VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 45887

Last change on this file since 45887 was 45887, checked in by vboxsync, 12 years ago

VMM/HMVMXR0: todo for thinking about tomorrow.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 376.0 KB
Line 
1/* $Id: HMVMXR0.cpp 45887 2013-05-03 00:07:08Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <iprt/asm-amd64-x86.h>
23#include <iprt/thread.h>
24#include <iprt/string.h>
25
26#include "HMInternal.h"
27#include <VBox/vmm/vm.h>
28#include "HWVMXR0.h"
29#include <VBox/vmm/pdmapi.h>
30#include <VBox/vmm/dbgf.h>
31#include <VBox/vmm/iom.h>
32#include <VBox/vmm/selm.h>
33#include <VBox/vmm/tm.h>
34#ifdef VBOX_WITH_REM
35# include <VBox/vmm/rem.h>
36#endif
37#ifdef DEBUG_ramshankar
38#define HMVMX_SAVE_FULL_GUEST_STATE
39#define HMVMX_SYNC_FULL_GUEST_STATE
40#define HMVMX_ALWAYS_TRAP_ALL_XCPTS
41#define HMVMX_ALWAYS_TRAP_PF
42#endif
43
44
45/*******************************************************************************
46* Defined Constants And Macros *
47*******************************************************************************/
48#define HMVMXHCUINTREG RTHCUINTREG
49#if defined(RT_ARCH_AMD64)
50# define HMVMX_IS_64BIT_HOST_MODE() (true)
51#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
52extern "C" uint32_t g_fVMXIs64bitHost;
53# define HMVMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)
54# undef HMVMXHCUINTREG
55# define HMVMXHCUINTREG uint64_t
56#else
57# define HMVMX_IS_64BIT_HOST_MODE() (false)
58#endif
59
60/** Use the function table. */
61#define HMVMX_USE_FUNCTION_TABLE
62
63/** This bit indicates the segment selector is unusable in VT-x. */
64#define HMVMX_SEL_UNUSABLE RT_BIT(16)
65
66/** Determine which tagged-TLB flush handler to use. */
67#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
68#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
69#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
70#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
71
72/** Updated-guest-state flags. */
73#define HMVMX_UPDATED_GUEST_RIP RT_BIT(0)
74#define HMVMX_UPDATED_GUEST_RSP RT_BIT(1)
75#define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2)
76#define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3)
77#define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4)
78#define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5)
79#define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6)
80#define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7)
81#define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8)
82#define HMVMX_UPDATED_GUEST_TR RT_BIT(9)
83#define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10)
84#define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(11)
85#define HMVMX_UPDATED_GUEST_FS_BASE_MSR RT_BIT(12)
86#define HMVMX_UPDATED_GUEST_GS_BASE_MSR RT_BIT(13)
87#define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(14)
88#define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(15)
89#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(16)
90#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(17)
91#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(18)
92#define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19)
93#define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \
94 | HMVMX_UPDATED_GUEST_RSP \
95 | HMVMX_UPDATED_GUEST_RFLAGS \
96 | HMVMX_UPDATED_GUEST_CR0 \
97 | HMVMX_UPDATED_GUEST_CR3 \
98 | HMVMX_UPDATED_GUEST_CR4 \
99 | HMVMX_UPDATED_GUEST_GDTR \
100 | HMVMX_UPDATED_GUEST_IDTR \
101 | HMVMX_UPDATED_GUEST_LDTR \
102 | HMVMX_UPDATED_GUEST_TR \
103 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \
104 | HMVMX_UPDATED_GUEST_DEBUG \
105 | HMVMX_UPDATED_GUEST_FS_BASE_MSR \
106 | HMVMX_UPDATED_GUEST_GS_BASE_MSR \
107 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \
108 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \
109 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \
110 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \
111 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \
112 | HMVMX_UPDATED_GUEST_APIC_STATE)
113
114/**
115 * Flags to skip redundant reads of some common VMCS fields that are not part of
116 * the guest-CPU state but are in the transient structure.
117 */
118#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
119#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
120#define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
121#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3)
122#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
123#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
124
125/**
126 * Exception bitmap mask for real-mode guests (real-on-v86). We need to intercept all exceptions manually (except #PF).
127 * #NM is also handled spearetely, see hmR0VmxLoadGuestControlRegs(). #PF need not be intercepted even in real-mode if
128 * we have Nested Paging support.
129 */
130#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) \
131 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
132 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
133 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
134 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
135 | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \
136 | RT_BIT(X86_XCPT_XF))
137
138/**
139 * Exception bitmap mask for all contributory exceptions.
140 */
141#define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
142 | RT_BIT(X86_XCPT_DE))
143
144/** Maximum VM-instruction error number. */
145#define HMVMX_INSTR_ERROR_MAX 28
146
147/** Profiling macro. */
148#ifdef HM_PROFILE_EXIT_DISPATCH
149# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
150# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
151#else
152# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
153# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
154#endif
155
156
157/*******************************************************************************
158* Structures and Typedefs *
159*******************************************************************************/
160/**
161 * A state structure for holding miscellaneous information across
162 * VMX non-root operation and restored after the transition.
163 */
164typedef struct VMXTRANSIENT
165{
166 /** The host's rflags/eflags. */
167 RTCCUINTREG uEFlags;
168#if HC_ARCH_BITS == 32
169 uint32_t u32Alignment0;
170#endif
171 /** The guest's LSTAR MSR value used for TPR patching for 32-bit guests. */
172 uint64_t u64LStarMsr;
173 /** The guest's TPR value used for TPR shadowing. */
174 uint8_t u8GuestTpr;
175 /** Alignment. */
176 uint8_t abAlignment0[6];
177
178 /** The basic VM-exit reason. */
179 uint16_t uExitReason;
180 /** Alignment. */
181 uint16_t u16Alignment0;
182 /** The VM-exit interruption error code. */
183 uint32_t uExitIntrErrorCode;
184 /** The VM-exit exit qualification. */
185 RTGCUINTPTR uExitQualification;
186#if GC_ARCH_BITS == 32
187 /** Alignment. */
188 uint32_t u32Alignment1;
189#endif
190
191 /** The VM-exit interruption-information field. */
192 uint32_t uExitIntrInfo;
193 /** The VM-exit instruction-length field. */
194 uint32_t cbInstr;
195 /** Whether the VM-entry failed or not. */
196 bool fVMEntryFailed;
197 /** Alignment. */
198 uint8_t abAlignment1[5];
199
200 /** The VM-entry interruption-information field. */
201 uint32_t uEntryIntrInfo;
202 /** The VM-entry exception error code field. */
203 uint32_t uEntryXcptErrorCode;
204 /** The VM-entry instruction length field. */
205 uint32_t cbEntryInstr;
206
207 /** IDT-vectoring information field. */
208 uint32_t uIdtVectoringInfo;
209 /** IDT-vectoring error code. */
210 uint32_t uIdtVectoringErrorCode;
211
212 /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
213 uint32_t fVmcsFieldsRead;
214 /** Whether TSC-offsetting should be setup before VM-entry. */
215 bool fUpdateTscOffsettingAndPreemptTimer;
216 /** Whether the VM-exit was caused by a page-fault during delivery of a
217 * contributary exception or a page-fault. */
218 bool fVectoringPF;
219} VMXTRANSIENT, *PVMXTRANSIENT;
220AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
221AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntrInfo, sizeof(uint64_t));
222AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntrInfo, sizeof(uint64_t));
223
224
225/**
226 * MSR-bitmap read permissions.
227 */
228typedef enum VMXMSREXITREAD
229{
230 /** Reading this MSR causes a VM-exit. */
231 VMXMSREXIT_INTERCEPT_READ = 0xb,
232 /** Reading this MSR does not cause a VM-exit. */
233 VMXMSREXIT_PASSTHRU_READ
234} VMXMSREXITREAD;
235
236/**
237 * MSR-bitmap write permissions.
238 */
239typedef enum VMXMSREXITWRITE
240{
241 /** Writing to this MSR causes a VM-exit. */
242 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
243 /** Writing to this MSR does not cause a VM-exit. */
244 VMXMSREXIT_PASSTHRU_WRITE
245} VMXMSREXITWRITE;
246
247
248/*******************************************************************************
249* Internal Functions *
250*******************************************************************************/
251static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr);
252static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
253 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState);
254#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
255static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
256#endif
257#ifndef HMVMX_USE_FUNCTION_TABLE
258DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
259#define HMVMX_EXIT_DECL static int
260#else
261#define HMVMX_EXIT_DECL static DECLCALLBACK(int)
262#endif
263
264HMVMX_EXIT_DECL hmR0VmxExitXcptNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
265HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
266HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
267HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
268HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
269HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
270HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
271HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
272HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
273HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
274HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
275HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
276HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
277HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
278HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
279HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
280HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
281HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
282HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
283HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
284HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
285HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
286HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
287HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
288HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
289HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
290HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
291HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
292HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
293HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
294HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
295HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
296HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
297HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
298HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
299HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
300HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
301HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
302HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
303HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
304HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
305HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
306HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
307HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
308
309static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
310static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
311static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
312static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
313static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
314static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
315static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
316
317
318/*******************************************************************************
319* Global Variables *
320*******************************************************************************/
321#ifdef HMVMX_USE_FUNCTION_TABLE
322/**
323 * VM-exit handler.
324 *
325 * @returns VBox status code.
326 * @param pVCpu Pointer to the VMCPU.
327 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
328 * out-of-sync. Make sure to update the required
329 * fields before using them.
330 * @param pVmxTransient Pointer to the VMX-transient structure.
331 */
332typedef DECLCALLBACK(int) FNVMEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
333/** Pointer to VM-exit handler. */
334typedef FNVMEXITHANDLER *const PFNVMEXITHANDLER;
335
336/**
337 * VMX_EXIT dispatch table.
338 */
339static const PFNVMEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
340{
341 /* 00 VMX_EXIT_XCPT_NMI */ hmR0VmxExitXcptNmi,
342 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
343 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
344 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
345 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
346 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
347 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
348 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
349 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
350 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
351 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
352 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
353 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
354 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
355 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
356 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
357 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
358 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
359 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitSetPendingXcptUD,
360 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
361 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
362 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
363 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
364 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
365 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
366 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
367 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
368 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
369 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
370 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
371 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
372 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
373 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
374 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
375 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
376 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
377 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
378 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
379 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
380 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
381 /* 40 UNDEFINED */ hmR0VmxExitPause,
382 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
383 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
384 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
385 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
386 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
387 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
388 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
389 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
390 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
391 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
392 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
393 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
394 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
395 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
396 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
397 /* 56 UNDEFINED */ hmR0VmxExitErrUndefined,
398 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
399 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
400 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD
401};
402#endif /* HMVMX_USE_FUNCTION_TABLE */
403
404#ifdef VBOX_STRICT
405static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
406{
407 /* 0 */ "(Not Used)",
408 /* 1 */ "VMCALL executed in VMX root operation.",
409 /* 2 */ "VMCLEAR with invalid physical address.",
410 /* 3 */ "VMCLEAR with VMXON pointer.",
411 /* 4 */ "VMLAUNCH with non-clear VMCS.",
412 /* 5 */ "VMRESUME with non-launched VMCS.",
413 /* 6 */ "VMRESUME after VMXOFF",
414 /* 7 */ "VM entry with invalid control fields.",
415 /* 8 */ "VM entry with invalid host state fields.",
416 /* 9 */ "VMPTRLD with invalid physical address.",
417 /* 10 */ "VMPTRLD with VMXON pointer.",
418 /* 11 */ "VMPTRLD with incorrect revision identifier.",
419 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
420 /* 13 */ "VMWRITE to read-only VMCS component.",
421 /* 14 */ "(Not Used)",
422 /* 15 */ "VMXON executed in VMX root operation.",
423 /* 16 */ "VM entry with invalid executive-VMCS pointer.",
424 /* 17 */ "VM entry with non-launched executing VMCS.",
425 /* 18 */ "VM entry with executive-VMCS pointer not VMXON pointer.",
426 /* 19 */ "VMCALL with non-clear VMCS.",
427 /* 20 */ "VMCALL with invalid VM-exit control fields.",
428 /* 21 */ "(Not Used)",
429 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
430 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
431 /* 24 */ "VMCALL with invalid SMM-monitor features.",
432 /* 25 */ "VM entry with invalid VM-execution control fields in executive VMCS.",
433 /* 26 */ "VM entry with events blocked by MOV SS.",
434 /* 27 */ "(Not Used)",
435 /* 28 */ "Invalid operand to INVEPT/INVVPID."
436};
437#endif /* VBOX_STRICT */
438
439
440
441/**
442 * Updates the VM's last error record. If there was a VMX instruction error,
443 * reads the error data from the VMCS and updates VCPU's last error record as
444 * well.
445 *
446 * @param pVM Pointer to the VM.
447 * @param pVCpu Pointer to the VMCPU (can be NULL if @a rc is not
448 * VERR_VMX_UNABLE_TO_START_VM or
449 * VERR_VMX_INVALID_VMCS_FIELD).
450 * @param rc The error code.
451 */
452static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
453{
454 AssertPtr(pVM);
455 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
456 || rc == VERR_VMX_UNABLE_TO_START_VM)
457 {
458 AssertPtrReturnVoid(pVCpu);
459 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.lasterror.u32InstrError);
460 }
461 pVM->hm.s.lLastError = rc;
462}
463
464
465/**
466 * Reads the VM-entry interruption-information field from the VMCS into the VMX
467 * transient structure.
468 *
469 * @returns VBox status code.
470 * @param pVmxTransient Pointer to the VMX transient structure.
471 *
472 * @remarks No-long-jump zone!!!
473 */
474DECLINLINE(int) hmR0VmxReadEntryIntrInfoVmcs(PVMXTRANSIENT pVmxTransient)
475{
476 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntrInfo);
477 AssertRCReturn(rc, rc);
478 return VINF_SUCCESS;
479}
480
481
482/**
483 * Reads the VM-entry exception error code field from the VMCS into
484 * the VMX transient structure.
485 *
486 * @returns VBox status code.
487 * @param pVmxTransient Pointer to the VMX transient structure.
488 *
489 * @remarks No-long-jump zone!!!
490 */
491DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
492{
493 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
494 AssertRCReturn(rc, rc);
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Reads the VM-entry exception error code field from the VMCS into
501 * the VMX transient structure.
502 *
503 * @returns VBox status code.
504 * @param pVCpu Pointer to the VMCPU.
505 * @param pVmxTransient Pointer to the VMX transient structure.
506 *
507 * @remarks No-long-jump zone!!!
508 */
509DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
510{
511 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
512 AssertRCReturn(rc, rc);
513 return VINF_SUCCESS;
514}
515
516
517/**
518 * Reads the VM-exit interruption-information field from the VMCS into the VMX
519 * transient structure.
520 *
521 * @returns VBox status code.
522 * @param pVCpu Pointer to the VMCPU.
523 * @param pVmxTransient Pointer to the VMX transient structure.
524 */
525DECLINLINE(int) hmR0VmxReadExitIntrInfoVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
526{
527 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
528 {
529 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntrInfo);
530 AssertRCReturn(rc, rc);
531 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
532 }
533 return VINF_SUCCESS;
534}
535
536
537/**
538 * Reads the VM-exit interruption error code from the VMCS into the VMX
539 * transient structure.
540 *
541 * @returns VBox status code.
542 * @param pVCpu Pointer to the VMCPU.
543 * @param pVmxTransient Pointer to the VMX transient structure.
544 */
545DECLINLINE(int) hmR0VmxReadExitIntrErrorCodeVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
546{
547 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
548 {
549 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntrErrorCode);
550 AssertRCReturn(rc, rc);
551 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
552 }
553 return VINF_SUCCESS;
554}
555
556
557/**
558 * Reads the VM-exit instruction length field from the VMCS into the VMX
559 * transient structure.
560 *
561 * @returns VBox status code.
562 * @param pVCpu Pointer to the VMCPU.
563 * @param pVmxTransient Pointer to the VMX transient structure.
564 */
565DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
566{
567 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
568 {
569 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
570 AssertRCReturn(rc, rc);
571 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
572 }
573 return VINF_SUCCESS;
574}
575
576
577/**
578 * Reads the exit qualification from the VMCS into the VMX transient structure.
579 *
580 * @returns VBox status code.
581 * @param pVCpu Pointer to the VMCPU.
582 * @param pVmxTransient Pointer to the VMX transient structure.
583 */
584DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
585{
586 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
587 {
588 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification);
589 AssertRCReturn(rc, rc);
590 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
591 }
592 return VINF_SUCCESS;
593}
594
595
596/**
597 * Reads the IDT-vectoring information field from the VMCS into the VMX
598 * transient structure.
599 *
600 * @returns VBox status code.
601 * @param pVmxTransient Pointer to the VMX transient structure.
602 *
603 * @remarks No-long-jump zone!!!
604 */
605DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
606{
607 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
608 {
609 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
610 AssertRCReturn(rc, rc);
611 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
612 }
613 return VINF_SUCCESS;
614}
615
616
617/**
618 * Reads the IDT-vectoring error code from the VMCS into the VMX
619 * transient structure.
620 *
621 * @returns VBox status code.
622 * @param pVmxTransient Pointer to the VMX transient structure.
623 */
624DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
625{
626 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
627 {
628 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
629 AssertRCReturn(rc, rc);
630 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
631 }
632 return VINF_SUCCESS;
633}
634
635
636/**
637 * Enters VMX root mode operation on the current CPU.
638 *
639 * @returns VBox status code.
640 * @param pVM Pointer to the VM (optional, can be NULL, after
641 * a resume).
642 * @param HCPhysCpuPage Physical address of the VMXON region.
643 * @param pvCpuPage Pointer to the VMXON region.
644 */
645static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
646{
647 AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
648 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
649 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
650
651 if (pVM)
652 {
653 /* Write the VMCS revision dword to the VMXON region. */
654 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
655 }
656
657 /* Enable the VMX bit in CR4 if necessary. */
658 RTCCUINTREG uCr4 = ASMGetCR4();
659 if (!(uCr4 & X86_CR4_VMXE))
660 ASMSetCR4(uCr4 | X86_CR4_VMXE);
661
662 /* Enter VMX root mode. */
663 int rc = VMXEnable(HCPhysCpuPage);
664 if (RT_FAILURE(rc))
665 ASMSetCR4(uCr4);
666
667 return rc;
668}
669
670
671/**
672 * Exits VMX root mode operation on the current CPU.
673 *
674 * @returns VBox status code.
675 */
676static int hmR0VmxLeaveRootMode(void)
677{
678 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
679
680 /* If we're for some reason not in VMX root mode, then don't leave it. */
681 if (ASMGetCR4() & X86_CR4_VMXE)
682 {
683 /* Exit VMX root mode and clear the VMX bit in CR4 */
684 VMXDisable();
685 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
686 return VINF_SUCCESS;
687 }
688
689 return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
690}
691
692
693/**
694 * Allocates and maps one physically contiguous page. The allocated page is
695 * zero'd out. (Used by various VT-x structures).
696 *
697 * @returns IPRT status code.
698 * @param pMemObj Pointer to the ring-0 memory object.
699 * @param ppVirt Where to store the virtual address of the
700 * allocation.
701 * @param pPhys Where to store the physical address of the
702 * allocation.
703 */
704DECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
705{
706 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
707 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
708 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
709
710 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
711 if (RT_FAILURE(rc))
712 return rc;
713 *ppVirt = RTR0MemObjAddress(*pMemObj);
714 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
715 ASMMemZero32(*ppVirt, PAGE_SIZE);
716 return VINF_SUCCESS;
717}
718
719
720/**
721 * Frees and unmaps an allocated physical page.
722 *
723 * @param pMemObj Pointer to the ring-0 memory object.
724 * @param ppVirt Where to re-initialize the virtual address of
725 * allocation as 0.
726 * @param pHCPhys Where to re-initialize the physical address of the
727 * allocation as 0.
728 */
729DECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
730{
731 AssertPtr(pMemObj);
732 AssertPtr(ppVirt);
733 AssertPtr(pHCPhys);
734 if (*pMemObj != NIL_RTR0MEMOBJ)
735 {
736 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
737 AssertRC(rc);
738 *pMemObj = NIL_RTR0MEMOBJ;
739 *ppVirt = 0;
740 *pHCPhys = 0;
741 }
742}
743
744
745/**
746 * Worker function to free VT-x related structures.
747 *
748 * @returns IPRT status code.
749 * @param pVM Pointer to the VM.
750 */
751static void hmR0VmxStructsFree(PVM pVM)
752{
753 for (VMCPUID i = 0; i < pVM->cCpus; i++)
754 {
755 PVMCPU pVCpu = &pVM->aCpus[i];
756 AssertPtr(pVCpu);
757
758#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
759 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
760 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
761#endif
762
763 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
764 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
765
766 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
767 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
768 }
769
770 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
771#ifdef VBOX_WITH_CRASHDUMP_MAGIC
772 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
773#endif
774}
775
776
777/**
778 * Worker function to allocate VT-x related VM structures.
779 *
780 * @returns IPRT status code.
781 * @param pVM Pointer to the VM.
782 */
783static int hmR0VmxStructsAlloc(PVM pVM)
784{
785 /*
786 * Initialize members up-front so we can cleanup properly on allocation failure.
787 */
788#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
789 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
790 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
791 pVM->hm.s.vmx.HCPhys##a_Name = 0;
792
793#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
794 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
795 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
796 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
797
798#ifdef VBOX_WITH_CRASHDUMP_MAGIC
799 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
800#endif
801 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
802
803 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
804 for (VMCPUID i = 0; i < pVM->cCpus; i++)
805 {
806 PVMCPU pVCpu = &pVM->aCpus[i];
807 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
808 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
809 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
810#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
811 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
812 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
813#endif
814 }
815#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
816#undef VMXLOCAL_INIT_VM_MEMOBJ
817
818 /*
819 * Allocate all the VT-x structures.
820 */
821 int rc = VINF_SUCCESS;
822#ifdef VBOX_WITH_CRASHDUMP_MAGIC
823 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
824 if (RT_FAILURE(rc))
825 goto cleanup;
826 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
827 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xDEADBEEFDEADBEEF);
828#endif
829
830 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
831 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
832 {
833 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
834 &pVM->hm.s.vmx.HCPhysApicAccess);
835 if (RT_FAILURE(rc))
836 goto cleanup;
837 }
838
839 /*
840 * Initialize per-VCPU VT-x structures.
841 */
842 for (VMCPUID i =0; i < pVM->cCpus; i++)
843 {
844 PVMCPU pVCpu = &pVM->aCpus[i];
845 AssertPtr(pVCpu);
846
847 /* Allocate the VM control structure (VMCS). */
848 AssertReturn(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.vmx_basic_info) <= PAGE_SIZE, VERR_INTERNAL_ERROR);
849 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
850 if (RT_FAILURE(rc))
851 goto cleanup;
852
853 /* Allocate the Virtual-APIC page for transparent TPR accesses. */
854 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
855 {
856 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
857 &pVCpu->hm.s.vmx.HCPhysVirtApic);
858 if (RT_FAILURE(rc))
859 goto cleanup;
860 }
861
862 /* Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for transparent accesses of specific MSRs. */
863 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
864 {
865 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
866 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
867 if (RT_FAILURE(rc))
868 goto cleanup;
869 memset(pVCpu->hm.s.vmx.pvMsrBitmap, 0xff, PAGE_SIZE);
870 }
871
872#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
873 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
874 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
875 if (RT_FAILURE(rc))
876 goto cleanup;
877
878 /* Allocate the VM-exit MSR-load page for the host MSRs. */
879 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
880 if (RT_FAILURE(rc))
881 goto cleanup;
882#endif
883 }
884
885 return VINF_SUCCESS;
886
887cleanup:
888 hmR0VmxStructsFree(pVM);
889 return rc;
890}
891
892
893/**
894 * Does global VT-x initialization (called during module initialization).
895 *
896 * @returns VBox status code.
897 */
898VMMR0DECL(int) VMXR0GlobalInit(void)
899{
900#ifdef HMVMX_USE_FUNCTION_TABLE
901 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
902# ifdef VBOX_STRICT
903 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
904 Assert(g_apfnVMExitHandlers[i]);
905# endif
906#endif
907 return VINF_SUCCESS;
908}
909
910
911/**
912 * Does global VT-x termination (called during module termination).
913 */
914VMMR0DECL(void) VMXR0GlobalTerm()
915{
916 /* Nothing to do currently. */
917}
918
919
920/**
921 * Sets up and activates VT-x on the current CPU.
922 *
923 * @returns VBox status code.
924 * @param pCpu Pointer to the global CPU info struct.
925 * @param pVM Pointer to the VM (can be NULL after a host resume
926 * operation).
927 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
928 * fEnabledByHost is true).
929 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
930 * @a fEnabledByHost is true).
931 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
932 * enable VT-x/AMD-V on the host.
933 */
934VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
935{
936 AssertReturn(pCpu, VERR_INVALID_PARAMETER);
937 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
938
939 if (!fEnabledByHost)
940 {
941 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
942 if (RT_FAILURE(rc))
943 return rc;
944 }
945
946 /*
947 * Flush all VPIDs (in case we or any other hypervisor have been using VPIDs) so that
948 * we can avoid an explicit flush while using new VPIDs. We would still need to flush
949 * each time while reusing a VPID after hitting the MaxASID limit once.
950 */
951 if ( pVM
952 && pVM->hm.s.vmx.fVpid
953 && (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS))
954 {
955 hmR0VmxFlushVpid(pVM, NULL /* pvCpu */, VMX_FLUSH_VPID_ALL_CONTEXTS, 0 /* GCPtr */);
956 pCpu->fFlushAsidBeforeUse = false;
957 }
958 else
959 pCpu->fFlushAsidBeforeUse = true;
960
961 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
962 ++pCpu->cTlbFlushes;
963
964 return VINF_SUCCESS;
965}
966
967
968/**
969 * Deactivates VT-x on the current CPU.
970 *
971 * @returns VBox status code.
972 * @param pCpu Pointer to the global CPU info struct.
973 * @param pvCpuPage Pointer to the VMXON region.
974 * @param HCPhysCpuPage Physical address of the VMXON region.
975 */
976VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
977{
978 NOREF(pCpu);
979 NOREF(pvCpuPage);
980 NOREF(HCPhysCpuPage);
981
982 hmR0VmxLeaveRootMode();
983 return VINF_SUCCESS;
984}
985
986
987/**
988 * Sets the permission bits for the specified MSR in the MSR bitmap.
989 *
990 * @param pVCpu Pointer to the VMCPU.
991 * @param uMSR The MSR value.
992 * @param enmRead Whether reading this MSR causes a VM-exit.
993 * @param enmWrite Whether writing this MSR causes a VM-exit.
994 */
995static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
996{
997 int32_t iBit;
998 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
999
1000 /*
1001 * Layout:
1002 * 0x000 - 0x3ff - Low MSR read bits
1003 * 0x400 - 0x7ff - High MSR read bits
1004 * 0x800 - 0xbff - Low MSR write bits
1005 * 0xc00 - 0xfff - High MSR write bits
1006 */
1007 if (uMsr <= 0x00001FFF)
1008 iBit = uMsr;
1009 else if ( uMsr >= 0xC0000000
1010 && uMsr <= 0xC0001FFF)
1011 {
1012 iBit = (uMsr - 0xC0000000);
1013 pbMsrBitmap += 0x400;
1014 }
1015 else
1016 {
1017 AssertMsgFailed(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1018 return;
1019 }
1020
1021 Assert(iBit <= 0x1fff);
1022 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1023 ASMBitSet(pbMsrBitmap, iBit);
1024 else
1025 ASMBitClear(pbMsrBitmap, iBit);
1026
1027 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1028 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1029 else
1030 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1031}
1032
1033
1034/**
1035 * Flushes the TLB using EPT.
1036 *
1037 * @returns VBox status code.
1038 * @param pVM Pointer to the VM.
1039 * @param pVCpu Pointer to the VMCPU.
1040 * @param enmFlush Type of flush.
1041 */
1042static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
1043{
1044 AssertPtr(pVM);
1045 Assert(pVM->hm.s.fNestedPaging);
1046
1047 LogFlowFunc(("pVM=%p pVCpu=%p enmFlush=%d\n", pVM, pVCpu, enmFlush));
1048
1049 uint64_t descriptor[2];
1050 descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1051 descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1052
1053 int rc = VMXR0InvEPT(enmFlush, &descriptor[0]);
1054 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu->hm.s.vmx.HCPhysEPTP, rc));
1055 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1056}
1057
1058
1059/**
1060 * Flushes the TLB using VPID.
1061 *
1062 * @returns VBox status code.
1063 * @param pVM Pointer to the VM.
1064 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1065 * enmFlush).
1066 * @param enmFlush Type of flush.
1067 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1068 * on @a enmFlush).
1069 */
1070static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr)
1071{
1072 AssertPtr(pVM);
1073 Assert(pVM->hm.s.vmx.fVpid);
1074
1075 uint64_t descriptor[2];
1076 if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS)
1077 {
1078 descriptor[0] = 0;
1079 descriptor[1] = 0;
1080 }
1081 else
1082 {
1083 AssertPtr(pVCpu);
1084 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1085 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1086 descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1087 descriptor[1] = GCPtr;
1088 }
1089
1090 int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc);
1091 AssertMsg(rc == VINF_SUCCESS,
1092 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1093 if ( RT_SUCCESS(rc)
1094 && pVCpu)
1095 {
1096 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1097 }
1098}
1099
1100
1101/**
1102 * Invalidates a guest page by guest virtual address. Only relevant for
1103 * EPT/VPID, otherwise there is nothing really to invalidate.
1104 *
1105 * @returns VBox status code.
1106 * @param pVM Pointer to the VM.
1107 * @param pVCpu Pointer to the VMCPU.
1108 * @param GCVirt Guest virtual address of the page to invalidate.
1109 */
1110VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1111{
1112 AssertPtr(pVM);
1113 AssertPtr(pVCpu);
1114 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
1115
1116 bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1117 if (!fFlushPending)
1118 {
1119 /*
1120 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
1121 * See @bugref{6043} and @bugref{6177}.
1122 *
1123 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
1124 * function maybe called in a loop with individual addresses.
1125 */
1126 if (pVM->hm.s.vmx.fVpid)
1127 {
1128 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1129 {
1130 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt);
1131 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1132 }
1133 else
1134 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1135 }
1136 else if (pVM->hm.s.fNestedPaging)
1137 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1138 }
1139
1140 return VINF_SUCCESS;
1141}
1142
1143
1144/**
1145 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
1146 * otherwise there is nothing really to invalidate.
1147 *
1148 * @returns VBox status code.
1149 * @param pVM Pointer to the VM.
1150 * @param pVCpu Pointer to the VMCPU.
1151 * @param GCPhys Guest physical address of the page to invalidate.
1152 */
1153VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1154{
1155 LogFlowFunc(("%RGp\n", GCPhys));
1156
1157 /*
1158 * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
1159 * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
1160 * This function might be called in a loop.
1161 */
1162 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1163 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
1164 return VINF_SUCCESS;
1165}
1166
1167
1168/**
1169 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1170 * case where neither EPT nor VPID is supported by the CPU.
1171 *
1172 * @param pVM Pointer to the VM.
1173 * @param pVCpu Pointer to the VMCPU.
1174 *
1175 * @remarks Called with interrupts disabled.
1176 */
1177static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu)
1178{
1179 NOREF(pVM);
1180 AssertPtr(pVCpu);
1181 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1182 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1183
1184 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1185 AssertPtr(pCpu);
1186
1187 pVCpu->hm.s.TlbShootdown.cPages = 0;
1188 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1189 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1190 pVCpu->hm.s.fForceTLBFlush = false;
1191 return;
1192}
1193
1194
1195/**
1196 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1197 *
1198 * @param pVM Pointer to the VM.
1199 * @param pVCpu Pointer to the VMCPU.
1200 * @remarks All references to "ASID" in this function pertains to "VPID" in
1201 * Intel's nomenclature. The reason is, to avoid confusion in compare
1202 * statements since the host-CPU copies are named "ASID".
1203 *
1204 * @remarks Called with interrupts disabled.
1205 */
1206static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu)
1207{
1208 AssertPtr(pVM);
1209 AssertPtr(pVCpu);
1210 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1211 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1212 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1213
1214 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1215 AssertPtr(pCpu);
1216
1217 /*
1218 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1219 * This can happen both for start & resume due to long jumps back to ring-3.
1220 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1221 * or the host Cpu is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1222 */
1223 bool fNewASID = false;
1224 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1225 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1226 {
1227 pVCpu->hm.s.fForceTLBFlush = true;
1228 fNewASID = true;
1229 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1230 }
1231
1232 /*
1233 * Check for explicit TLB shootdowns.
1234 */
1235 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1236 {
1237 pVCpu->hm.s.fForceTLBFlush = true;
1238 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1239 }
1240
1241 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1242 if (pVCpu->hm.s.fForceTLBFlush)
1243 {
1244 if (fNewASID)
1245 {
1246 ++pCpu->uCurrentAsid;
1247 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1248 {
1249 pCpu->uCurrentAsid = 1; /* start at 1; host uses 0 */
1250 pCpu->cTlbFlushes++;
1251 pCpu->fFlushAsidBeforeUse = true;
1252 }
1253
1254 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1255 if (pCpu->fFlushAsidBeforeUse)
1256 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1257 }
1258 else
1259 {
1260 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
1261 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_SINGLE_CONTEXT, 0 /* GCPtr */);
1262 else
1263 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1264 }
1265
1266 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1267 pVCpu->hm.s.fForceTLBFlush = false;
1268 }
1269 else
1270 {
1271 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
1272 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
1273 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
1274 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
1275
1276 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1277 * not be executed. See hmQueueInvlPage() where it is commented
1278 * out. Support individual entry flushing someday. */
1279 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1280 {
1281 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1282
1283 /*
1284 * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
1285 * as supported by the CPU.
1286 */
1287 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1288 {
1289 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1290 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1291 }
1292 else
1293 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1294 }
1295 else
1296 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1297 }
1298 pVCpu->hm.s.TlbShootdown.cPages = 0;
1299 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1300
1301 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1302 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1303 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1304 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1305 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1306 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1307
1308 /* Update VMCS with the VPID. */
1309 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1310 AssertRC(rc);
1311}
1312
1313
1314/**
1315 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
1316 *
1317 * @returns VBox status code.
1318 * @param pVM Pointer to the VM.
1319 * @param pVCpu Pointer to the VMCPU.
1320 *
1321 * @remarks Called with interrupts disabled.
1322 */
1323static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu)
1324{
1325 AssertPtr(pVM);
1326 AssertPtr(pVCpu);
1327 AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
1328 AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
1329
1330 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1331 AssertPtr(pCpu);
1332
1333 /*
1334 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1335 * This can happen both for start & resume due to long jumps back to ring-3.
1336 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
1337 */
1338 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1339 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1340 {
1341 pVCpu->hm.s.fForceTLBFlush = true;
1342 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1343 }
1344
1345 /* Check for explicit TLB shootdown flushes. */
1346 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1347 {
1348 pVCpu->hm.s.fForceTLBFlush = true;
1349 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1350 }
1351
1352 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1353 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1354
1355 if (pVCpu->hm.s.fForceTLBFlush)
1356 {
1357 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1358 pVCpu->hm.s.fForceTLBFlush = false;
1359 }
1360 else
1361 {
1362 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1363 * not be executed. See hmQueueInvlPage() where it is commented
1364 * out. Support individual entry flushing someday. */
1365 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1366 {
1367 /* We cannot flush individual entries without VPID support. Flush using EPT. */
1368 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1369 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1370 }
1371 else
1372 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1373 }
1374
1375 pVCpu->hm.s.TlbShootdown.cPages = 0;
1376 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1377}
1378
1379
1380/**
1381 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
1382 *
1383 * @returns VBox status code.
1384 * @param pVM Pointer to the VM.
1385 * @param pVCpu Pointer to the VMCPU.
1386 *
1387 * @remarks Called with interrupts disabled.
1388 */
1389static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu)
1390{
1391 AssertPtr(pVM);
1392 AssertPtr(pVCpu);
1393 AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
1394 AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
1395
1396 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1397
1398 /*
1399 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
1400 * This can happen both for start & resume due to long jumps back to ring-3.
1401 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1402 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1403 */
1404 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1405 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1406 {
1407 pVCpu->hm.s.fForceTLBFlush = true;
1408 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1409 }
1410
1411 /* Check for explicit TLB shootdown flushes. */
1412 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1413 {
1414 /*
1415 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
1416 * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
1417 * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
1418 */
1419 pVCpu->hm.s.fForceTLBFlush = true;
1420 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1421 }
1422
1423 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1424 if (pVCpu->hm.s.fForceTLBFlush)
1425 {
1426 ++pCpu->uCurrentAsid;
1427 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1428 {
1429 pCpu->uCurrentAsid = 1; /* start at 1; host uses 0 */
1430 pCpu->fFlushAsidBeforeUse = true;
1431 pCpu->cTlbFlushes++;
1432 }
1433
1434 pVCpu->hm.s.fForceTLBFlush = false;
1435 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1436 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1437 if (pCpu->fFlushAsidBeforeUse)
1438 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1439 }
1440 else
1441 {
1442 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
1443 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
1444 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
1445 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
1446
1447 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1448 * not be executed. See hmQueueInvlPage() where it is commented
1449 * out. Support individual entry flushing someday. */
1450 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1451 {
1452 /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
1453 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1454 {
1455 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1456 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1457 }
1458 else
1459 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1460 }
1461 else
1462 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1463 }
1464
1465 pVCpu->hm.s.TlbShootdown.cPages = 0;
1466 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1467
1468 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1469 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1470 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1471 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1472 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1473 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1474
1475 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1476 AssertRC(rc);
1477}
1478
1479
1480/**
1481 * Flushes the guest TLB entry based on CPU capabilities.
1482 *
1483 * @param pVCpu Pointer to the VMCPU.
1484 */
1485DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu)
1486{
1487 PVM pVM = pVCpu->CTX_SUFF(pVM);
1488 switch (pVM->hm.s.vmx.uFlushTaggedTlb)
1489 {
1490 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu); break;
1491 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu); break;
1492 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu); break;
1493 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu); break;
1494 default:
1495 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
1496 break;
1497 }
1498}
1499
1500
1501/**
1502 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
1503 * TLB entries from the host TLB before VM-entry.
1504 *
1505 * @returns VBox status code.
1506 * @param pVM Pointer to the VM.
1507 */
1508static int hmR0VmxSetupTaggedTlb(PVM pVM)
1509{
1510 /*
1511 * Determine optimal flush type for nested paging.
1512 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
1513 * guest execution (see hmR3InitFinalizeR0()).
1514 */
1515 if (pVM->hm.s.fNestedPaging)
1516 {
1517 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
1518 {
1519 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
1520 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT;
1521 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1522 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS;
1523 else
1524 {
1525 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
1526 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1527 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1528 }
1529
1530 /* Make sure the write-back cacheable memory type for EPT is supported. */
1531 if (!(pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
1532 {
1533 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.msr.vmx_ept_vpid_caps));
1534 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1535 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1536 }
1537 }
1538 else
1539 {
1540 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
1541 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1542 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1543 }
1544 }
1545
1546 /*
1547 * Determine optimal flush type for VPID.
1548 */
1549 if (pVM->hm.s.vmx.fVpid)
1550 {
1551 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
1552 {
1553 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
1554 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT;
1555 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
1556 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS;
1557 else
1558 {
1559 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
1560 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1561 LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
1562 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1563 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
1564 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1565 pVM->hm.s.vmx.fVpid = false;
1566 }
1567 }
1568 else
1569 {
1570 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
1571 Log(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
1572 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1573 pVM->hm.s.vmx.fVpid = false;
1574 }
1575 }
1576
1577 /*
1578 * Setup the handler for flushing tagged-TLBs.
1579 */
1580 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
1581 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID;
1582 else if (pVM->hm.s.fNestedPaging)
1583 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT;
1584 else if (pVM->hm.s.vmx.fVpid)
1585 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID;
1586 else
1587 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE;
1588 return VINF_SUCCESS;
1589}
1590
1591
1592/**
1593 * Sets up pin-based VM-execution controls in the VMCS.
1594 *
1595 * @returns VBox status code.
1596 * @param pVM Pointer to the VM.
1597 * @param pVCpu Pointer to the VMCPU.
1598 */
1599static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
1600{
1601 AssertPtr(pVM);
1602 AssertPtr(pVCpu);
1603
1604 uint32_t val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; /* Bits set here must always be set. */
1605 uint32_t zap = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1; /* Bits cleared here must always be cleared. */
1606
1607 val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT /* External interrupts causes a VM-exits. */
1608 | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT; /* Non-maskable interrupts causes a VM-exit. */
1609 Assert(!(val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI));
1610
1611 /* Enable the VMX preemption timer. */
1612 if (pVM->hm.s.vmx.fUsePreemptTimer)
1613 {
1614 Assert(pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER);
1615 val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER;
1616 }
1617
1618 if ((val & zap) != val)
1619 {
1620 LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1621 pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0, val, zap));
1622 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1623 }
1624
1625 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, val);
1626 AssertRCReturn(rc, rc);
1627
1628 /* Update VCPU with the currently set pin-based VM-execution controls. */
1629 pVCpu->hm.s.vmx.u32PinCtls = val;
1630 return rc;
1631}
1632
1633
1634/**
1635 * Sets up processor-based VM-execution controls in the VMCS.
1636 *
1637 * @returns VBox status code.
1638 * @param pVM Pointer to the VM.
1639 * @param pVMCPU Pointer to the VMCPU.
1640 */
1641static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
1642{
1643 AssertPtr(pVM);
1644 AssertPtr(pVCpu);
1645
1646 int rc = VERR_INTERNAL_ERROR_5;
1647 uint32_t val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0; /* Bits set here must be set in the VMCS. */
1648 uint32_t zap = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1649
1650 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT /* HLT causes a VM-exit. */
1651 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
1652 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
1653 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
1654 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT /* RDPMC causes a VM-exit. */
1655 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT /* MONITOR causes a VM-exit. */
1656 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
1657
1658 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
1659 if ( !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
1660 || (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT))
1661 {
1662 LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT combo!"));
1663 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1664 }
1665
1666 /* Without nested paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
1667 if (!pVM->hm.s.fNestedPaging)
1668 {
1669 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
1670 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
1671 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
1672 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
1673 }
1674
1675 /* Use TPR shadowing if supported by the CPU. */
1676 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
1677 {
1678 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
1679 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
1680 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
1681 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
1682 AssertRCReturn(rc, rc);
1683
1684 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
1685 /* CR8 writes causes a VM-exit based on TPR threshold. */
1686 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT));
1687 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT));
1688 }
1689 else
1690 {
1691 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT /* CR8 reads causes a VM-exit. */
1692 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT; /* CR8 writes causes a VM-exit. */
1693 }
1694
1695 /* Use MSR-bitmaps if supported by the CPU. */
1696 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
1697 {
1698 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS;
1699
1700 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1701 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
1702 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1703 AssertRCReturn(rc, rc);
1704
1705 /*
1706 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
1707 * automatically (either as part of the MSR-load/store areas or dedicated fields in the VMCS).
1708 */
1709 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1710 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1711 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1712 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1713 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1714 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1715 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1716 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1717 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1718 }
1719
1720 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
1721 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1722 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
1723
1724 if ((val & zap) != val)
1725 {
1726 LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1727 pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0, val, zap));
1728 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1729 }
1730
1731 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, val);
1732 AssertRCReturn(rc, rc);
1733
1734 /* Update VCPU with the currently set processor-based VM-execution controls. */
1735 pVCpu->hm.s.vmx.u32ProcCtls = val;
1736
1737 /*
1738 * Secondary processor-based VM-execution controls.
1739 */
1740 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
1741 {
1742 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
1743 zap = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1744
1745 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
1746 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
1747
1748 if (pVM->hm.s.fNestedPaging)
1749 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */
1750 else
1751 {
1752 /*
1753 * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
1754 * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT when INVPCID is executed by the guest.
1755 * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
1756 */
1757 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
1758 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
1759 }
1760
1761 if (pVM->hm.s.vmx.fVpid)
1762 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */
1763
1764 if (pVM->hm.s.vmx.fUnrestrictedGuest)
1765 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
1766
1767 /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
1768 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
1769 * done dynamically. */
1770 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
1771 {
1772 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
1773 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
1774 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
1775 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
1776 AssertRCReturn(rc, rc);
1777 }
1778
1779 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
1780 {
1781 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
1782 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
1783 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1784 }
1785
1786 if ((val & zap) != val)
1787 {
1788 LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! "
1789 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0, val, zap));
1790 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1791 }
1792
1793 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2, val);
1794 AssertRCReturn(rc, rc);
1795
1796 /* Update VCPU with the currently set secondary processor-based VM-execution controls. */
1797 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
1798 }
1799
1800 return VINF_SUCCESS;
1801}
1802
1803
1804/**
1805 * Sets up miscellaneous (everything other than Pin & Processor-based
1806 * VM-execution) control fields in the VMCS.
1807 *
1808 * @returns VBox status code.
1809 * @param pVM Pointer to the VM.
1810 * @param pVCpu Pointer to the VMCPU.
1811 */
1812static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
1813{
1814 AssertPtr(pVM);
1815 AssertPtr(pVCpu);
1816
1817 int rc = VERR_GENERAL_FAILURE;
1818
1819 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestControlRegs())*/
1820 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0);
1821
1822 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0);
1823
1824 /*
1825 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
1826 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
1827 * We thus use the exception bitmap to control it rather than use both.
1828 */
1829 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0);
1830 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0);
1831
1832 /** @todo Explore possibility of using IO-bitmaps. */
1833 /* All IO & IOIO instructions cause VM-exits. */
1834 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0);
1835 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0);
1836
1837#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
1838 /* Setup MSR autoloading/autostoring. */
1839 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
1840 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
1841 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1842 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1843 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
1844 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
1845
1846 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
1847 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
1848 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
1849 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0);
1850#else
1851 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
1852 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
1853 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0);
1854#endif
1855
1856 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
1857 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
1858
1859 /* Setup debug controls */
1860 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
1861 rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
1862 AssertRCReturn(rc, rc);
1863 return rc;
1864}
1865
1866
1867/**
1868 * Sets up the initial exception bitmap in the VMCS based on static conditions
1869 * (i.e. conditions that cannot ever change at runtime).
1870 *
1871 * @returns VBox status code.
1872 * @param pVM Pointer to the VM.
1873 * @param pVCpu Pointer to the VMCPU.
1874 */
1875static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
1876{
1877 AssertPtr(pVM);
1878 AssertPtr(pVCpu);
1879
1880 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
1881
1882 uint32_t u32XcptBitmap = 0;
1883
1884 /* Without nested paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
1885 if (!pVM->hm.s.fNestedPaging)
1886 u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
1887
1888 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
1889 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
1890 AssertRCReturn(rc, rc);
1891 return rc;
1892}
1893
1894
1895/**
1896 * Sets up the initial guest-state mask. The guest-state mask is consulted
1897 * before reading guest-state fields from the VMCS as VMREADs can be expensive
1898 * for the nested virtualization case (as it would cause a VM-exit).
1899 *
1900 * @param pVCpu Pointer to the VMCPU.
1901 */
1902static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
1903{
1904 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
1905 pVCpu->hm.s.vmx.fUpdatedGuestState = HMVMX_UPDATED_GUEST_ALL;
1906 return VINF_SUCCESS;
1907}
1908
1909
1910/**
1911 * Does per-VM VT-x initialization.
1912 *
1913 * @returns VBox status code.
1914 * @param pVM Pointer to the VM.
1915 */
1916VMMR0DECL(int) VMXR0InitVM(PVM pVM)
1917{
1918 LogFlowFunc(("pVM=%p\n", pVM));
1919
1920 int rc = hmR0VmxStructsAlloc(pVM);
1921 if (RT_FAILURE(rc))
1922 {
1923 LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
1924 return rc;
1925 }
1926
1927 return VINF_SUCCESS;
1928}
1929
1930
1931/**
1932 * Does per-VM VT-x termination.
1933 *
1934 * @returns VBox status code.
1935 * @param pVM Pointer to the VM.
1936 */
1937VMMR0DECL(int) VMXR0TermVM(PVM pVM)
1938{
1939 LogFlowFunc(("pVM=%p\n", pVM));
1940
1941#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1942 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
1943 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
1944#endif
1945 hmR0VmxStructsFree(pVM);
1946 return VINF_SUCCESS;
1947}
1948
1949
1950/**
1951 * Sets up the VM for execution under VT-x.
1952 * This function is only called once per-VM during initalization.
1953 *
1954 * @returns VBox status code.
1955 * @param pVM Pointer to the VM.
1956 */
1957VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
1958{
1959 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
1960 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1961
1962 LogFlowFunc(("pVM=%p\n", pVM));
1963
1964 /*
1965 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
1966 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0().
1967 */
1968 /* -XXX- change hmR3InitFinalizeR0() to fail if pRealModeTSS alloc fails. */
1969 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
1970 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
1971 || !pVM->hm.s.vmx.pRealModeTSS))
1972 {
1973 LogRel(("VMXR0SetupVM: invalid real-on-v86 state.\n"));
1974 return VERR_INTERNAL_ERROR;
1975 }
1976
1977 /* Initialize these always, see hmR3InitFinalizeR0().*/
1978 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NONE;
1979 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NONE;
1980
1981 /* Setup the tagged-TLB flush handlers. */
1982 int rc = hmR0VmxSetupTaggedTlb(pVM);
1983 if (RT_FAILURE(rc))
1984 {
1985 LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
1986 return rc;
1987 }
1988
1989 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1990 {
1991 PVMCPU pVCpu = &pVM->aCpus[i];
1992 AssertPtr(pVCpu);
1993 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
1994
1995 /* Set revision dword at the beginning of the VMCS structure. */
1996 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
1997
1998 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
1999 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2000 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2001 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2002
2003 /* Load this VMCS as the current VMCS. */
2004 rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2005 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2006 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2007
2008 rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
2009 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2010 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2011
2012 rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
2013 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2014 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2015
2016 rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
2017 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2018 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2019
2020 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
2021 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2022 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2023
2024 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
2025 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2026 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2027
2028#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2029 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
2030 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2031 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2032#endif
2033
2034 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
2035 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2036 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2037 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2038
2039 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
2040 }
2041
2042 return VINF_SUCCESS;
2043}
2044
2045
2046/**
2047 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2048 * the VMCS.
2049 *
2050 * @returns VBox status code.
2051 * @param pVM Pointer to the VM.
2052 * @param pVCpu Pointer to the VMCPU.
2053 */
2054DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
2055{
2056 RTCCUINTREG uReg = ASMGetCR0();
2057 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2058
2059#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2060 /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */
2061 if (HMVMX_IS_64BIT_HOST_MODE())
2062 {
2063 uint64_t uRegCR3 = hmR0Get64bitCR3();
2064 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3);
2065 }
2066 else
2067#endif
2068 {
2069 uReg = ASMGetCR3();
2070 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2071 }
2072
2073 uReg = ASMGetCR4();
2074 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2075 AssertRCReturn(rc, rc);
2076 return rc;
2077}
2078
2079
2080/**
2081 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2082 * the host-state area in the VMCS.
2083 *
2084 * @returns VBox status code.
2085 * @param pVM Pointer to the VM.
2086 * @param pVCpu Pointer to the VMCPU.
2087 */
2088DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
2089{
2090 int rc = VERR_INTERNAL_ERROR_5;
2091 RTSEL uSelCS = 0;
2092 RTSEL uSelSS = 0;
2093 RTSEL uSelDS = 0;
2094 RTSEL uSelES = 0;
2095 RTSEL uSelFS = 0;
2096 RTSEL uSelGS = 0;
2097 RTSEL uSelTR = 0;
2098
2099 /*
2100 * Host Selector registers.
2101 */
2102#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2103 if (HMVMX_IS_64BIT_HOST_MODE())
2104 {
2105 uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;
2106 uSelSS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;
2107 }
2108 else
2109 {
2110 /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */
2111 uSelCS = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;
2112 uSelSS = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;
2113 }
2114#else
2115 uSelCS = ASMGetCS();
2116 uSelSS = ASMGetSS();
2117#endif
2118
2119 /* Note: VT-x is picky about the RPL of the selectors here; we'll restore them manually. */
2120 uSelTR = ASMGetTR();
2121
2122 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2123 /** @todo Verify if we have any platform that actually run with DS or ES with
2124 * RPL != 0 in kernel space. */
2125 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2126 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2127 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2128 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2129 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2130 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2131 Assert(uSelCS != 0);
2132 Assert(uSelTR != 0);
2133
2134 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2135#if 0
2136 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE))
2137 Assert(uSelSS != 0);
2138#endif
2139
2140 /* Write these host selector fields into the host-state area in the VMCS. */
2141 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS);
2142 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS);
2143 /* Avoid the VMWRITEs as we set the following segments to 0 and the VMCS fields are already 0 (since g_HvmR0 is static) */
2144#if 0
2145 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS);
2146 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES);
2147 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS);
2148 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS);
2149#endif
2150 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR);
2151 AssertRCReturn(rc, rc);
2152
2153 /*
2154 * Host GDTR and IDTR.
2155 */
2156 /** @todo Despite VT-x -not- restoring the limits on GDTR and IDTR it should
2157 * be safe to -not- save and restore GDTR and IDTR in the assembly
2158 * code and just do it here and don't care if the limits are zapped on
2159 * VM-exit. */
2160 RTGDTR Gdtr;
2161 RT_ZERO(Gdtr);
2162#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2163 if (HMVMX_IS_64BIT_HOST_MODE())
2164 {
2165 X86XDTR64 Gdtr64;
2166 X86XDTR64 Idtr64;
2167 hmR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64);
2168 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr);
2169 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr);
2170 Gdtr.cbGdt = Gdtr64.cb;
2171 Gdtr.pGdt = (uintptr_t)Gdtr64.uAddr;
2172 }
2173 else
2174#endif
2175 {
2176 RTIDTR Idtr;
2177 ASMGetGDTR(&Gdtr);
2178 ASMGetIDTR(&Idtr);
2179 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt);
2180 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt);
2181 }
2182 AssertRCReturn(rc, rc);
2183
2184 /*
2185 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
2186 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
2187 */
2188 if ((uSelTR & X86_SEL_MASK) > Gdtr.cbGdt)
2189 {
2190 AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit.TR=%RTsel Gdtr.cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));
2191 return VERR_VMX_INVALID_HOST_STATE;
2192 }
2193
2194 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
2195#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2196 if (HMVMX_IS_64BIT_HOST_MODE())
2197 {
2198 /* We need the 64-bit TR base for hybrid darwin. */
2199 uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc);
2200 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, u64TRBase);
2201 }
2202 else
2203#endif
2204 {
2205 uintptr_t uTRBase;
2206#if HC_ARCH_BITS == 64
2207 uTRBase = X86DESC64_BASE(pDesc);
2208#else
2209 uTRBase = X86DESC_BASE(pDesc);
2210#endif
2211 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
2212 }
2213 AssertRCReturn(rc, rc);
2214
2215 /*
2216 * Host FS base and GS base.
2217 * For 32-bit hosts the base is handled by the assembly code where we push/pop FS and GS which .
2218 * would take care of the bases. In 64-bit, the MSRs come into play.
2219 */
2220#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2221 if (HMVMX_IS_64BIT_HOST_MODE())
2222 {
2223 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
2224 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
2225 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_FS_BASE, u64FSBase);
2226 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_GS_BASE, u64GSBase);
2227 AssertRCReturn(rc, rc);
2228 }
2229#endif
2230 return rc;
2231}
2232
2233
2234/**
2235 * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
2236 * host-state area of the VMCS. Theses MSRs will be automatically restored on
2237 * the host after every successful VM exit.
2238 *
2239 * @returns VBox status code.
2240 * @param pVM Pointer to the VM.
2241 * @param pVCpu Pointer to the VMCPU.
2242 */
2243DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
2244{
2245 AssertPtr(pVCpu);
2246 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
2247
2248 int rc = VINF_SUCCESS;
2249#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
2250 PVMXMSR pHostMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMsr;
2251 uint32_t cHostMsrs = 0;
2252 uint32_t u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
2253
2254 if (u32HostExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
2255 {
2256 pHostMsr->u32IndexMSR = MSR_K6_EFER;
2257 pHostMsr->u32Reserved = 0;
2258# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2259 if (CPUMIsGuestInLongMode(pVCpu))
2260 {
2261 /* Must match the EFER value in our 64 bits switcher. */
2262 pHostMsr->u64Value = ASMRdMsr(MSR_K6_EFER) | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE;
2263 }
2264 else
2265# endif
2266 pHostMsr->u64Value = ASMRdMsr(MSR_K6_EFER);
2267 pHostMsr++; cHostMsrs++;
2268 }
2269
2270# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2271 if (HMVMX_IS_64BIT_HOST_MODE())
2272 {
2273 pHostMsr->u32IndexMSR = MSR_K6_STAR;
2274 pHostMsr->u32Reserved = 0;
2275 pHostMsr->u64Value = ASMRdMsr(MSR_K6_STAR); /* legacy syscall eip, cs & ss */
2276 pHostMsr++; cHostMsrs++;
2277 pHostMsr->u32IndexMSR = MSR_K8_LSTAR;
2278 pHostMsr->u32Reserved = 0;
2279 pHostMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64-bit mode syscall rip */
2280 pHostMsr++; cHostMsrs++;
2281 pHostMsr->u32IndexMSR = MSR_K8_SF_MASK;
2282 pHostMsr->u32Reserved = 0;
2283 pHostMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */
2284 pHostMsr++; cHostMsrs++;
2285 pHostMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
2286 pHostMsr->u32Reserved = 0;
2287 pHostMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */
2288 pHostMsr++; cHostMsrs++;
2289 }
2290# endif
2291
2292 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
2293 if (RT_UNLIKELY(cHostMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)))
2294 {
2295 LogRel(("cHostMsrs=%u Cpu=%u\n", cHostMsrs, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)));
2296 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2297 }
2298
2299 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs);
2300#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
2301
2302 /*
2303 * Host Sysenter MSRs.
2304 */
2305 rc |= VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
2306# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2307 if (HMVMX_IS_64BIT_HOST_MODE())
2308 {
2309 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2310 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2311 }
2312 else
2313 {
2314 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2315 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2316 }
2317# elif HC_ARCH_BITS == 32
2318 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2319 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2320# else
2321 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2322 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2323# endif
2324 AssertRCReturn(rc, rc);
2325
2326 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT, IA32_EFER, also see
2327 * hmR0VmxSetupExitCtls() !! */
2328 return rc;
2329}
2330
2331
2332/**
2333 * Sets up VM-entry controls in the VMCS. These controls can affect things done
2334 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
2335 * controls".
2336 *
2337 * @returns VBox status code.
2338 * @param pVCpu Pointer to the VMCPU.
2339 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2340 * out-of-sync. Make sure to update the required fields
2341 * before using them.
2342 *
2343 * @remarks No-long-jump zone!!!
2344 */
2345DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2346{
2347 int rc = VINF_SUCCESS;
2348 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_ENTRY_CTLS)
2349 {
2350 PVM pVM = pVCpu->CTX_SUFF(pVM);
2351 uint32_t val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0; /* Bits set here must be set in the VMCS. */
2352 uint32_t zap = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2353
2354 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
2355 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG;
2356
2357 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
2358 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2359 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST;
2360 else
2361 Assert(!(val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST));
2362
2363 /*
2364 * The following should not be set (since we're not in SMM mode):
2365 * - VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM
2366 * - VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON
2367 */
2368
2369 /** @todo VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR,
2370 * VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR,
2371 * VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR */
2372
2373 if ((val & zap) != val)
2374 {
2375 LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2376 pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0, val, zap));
2377 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2378 }
2379
2380 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_CONTROLS, val);
2381 AssertRCReturn(rc, rc);
2382
2383 /* Update VCPU with the currently set VM-exit controls. */
2384 pVCpu->hm.s.vmx.u32EntryCtls = val;
2385 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_ENTRY_CTLS;
2386 }
2387 return rc;
2388}
2389
2390
2391/**
2392 * Sets up the VM-exit controls in the VMCS.
2393 *
2394 * @returns VBox status code.
2395 * @param pVM Pointer to the VM.
2396 * @param pVCpu Pointer to the VMCPU.
2397 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2398 * out-of-sync. Make sure to update the required fields
2399 * before using them.
2400 *
2401 * @remarks requires EFER.
2402 */
2403DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2404{
2405 int rc = VINF_SUCCESS;
2406 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_EXIT_CTLS)
2407 {
2408 PVM pVM = pVCpu->CTX_SUFF(pVM);
2409 uint32_t val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0; /* Bits set here must be set in the VMCS. */
2410 uint32_t zap = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2411
2412 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
2413 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG;
2414
2415 /* Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary. */
2416#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2417 if (HMVMX_IS_64BIT_HOST_MODE())
2418 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE;
2419 else
2420 Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE));
2421#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
2422 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2423 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE; /* The switcher goes to long mode. */
2424 else
2425 Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE));
2426#endif
2427
2428 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
2429 Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT));
2430
2431 /** @todo VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_PERF_MSR,
2432 * VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR,
2433 * VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR,
2434 * VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR,
2435 * VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR. */
2436
2437 if (pVM->hm.s.vmx.msr.vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
2438 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER;
2439
2440 if ((val & zap) != val)
2441 {
2442 LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2443 pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0, val, zap));
2444 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2445 }
2446
2447 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_CONTROLS, val);
2448 AssertRCReturn(rc, rc);
2449
2450 /* Update VCPU with the currently set VM-exit controls. */
2451 pVCpu->hm.s.vmx.u32ExitCtls = val;
2452 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_EXIT_CTLS;
2453 }
2454 return rc;
2455}
2456
2457
2458/**
2459 * Loads the guest APIC and related state.
2460 *
2461 * @returns VBox status code.
2462 * @param pVM Pointer to the VM.
2463 * @param pVCpu Pointer to the VMCPU.
2464 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2465 * out-of-sync. Make sure to update the required fields
2466 * before using them.
2467 */
2468DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2469{
2470 int rc = VINF_SUCCESS;
2471 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_APIC_STATE)
2472 {
2473 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
2474 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
2475 {
2476 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2477
2478 bool fPendingIntr = false;
2479 uint8_t u8GuestTpr = 0;
2480 rc = PDMApicGetTPR(pVCpu, &u8GuestTpr, &fPendingIntr);
2481 AssertRCReturn(rc, rc);
2482
2483 /*
2484 * If there are external interrupts pending but masked by the TPR value, apply the threshold so that if the guest
2485 * lowers the TPR, it would cause a VM-exit and we can deliver the interrupt.
2486 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
2487 * the interrupt when we VM-exit for other reasons.
2488 */
2489 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8GuestTpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
2490 /* Bits 3-0 of the TPR threshold field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
2491 uint32_t u32TprThreshold = fPendingIntr ? (u8GuestTpr >> 4) : 0;
2492 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
2493
2494 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
2495 AssertRCReturn(rc, rc);
2496
2497 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
2498 if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive)
2499 {
2500 Assert(!CPUMIsGuestInLongModeEx(pMixedCtx)); /* EFER always up-to-date. */
2501 pMixedCtx->msrLSTAR = u8GuestTpr;
2502 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
2503 {
2504 /* If there are interrupts pending, intercept CR8 writes, otherwise don't intercept CR8 reads or writes. */
2505 if (fPendingIntr)
2506 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_INTERCEPT_WRITE);
2507 else
2508 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2509 }
2510 }
2511 }
2512
2513 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_APIC_STATE;
2514 }
2515 return rc;
2516}
2517
2518
2519/**
2520 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
2521 *
2522 * @returns
2523 * @param pVCpu Pointer to the VMCPU.
2524 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2525 * out-of-sync. Make sure to update the required fields
2526 * before using them.
2527 *
2528 * @remarks No-long-jump zone!!!
2529 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
2530 */
2531DECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2532{
2533 /*
2534 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
2535 * inhibit interrupts or clear any existing interrupt-inhibition.
2536 */
2537 uint32_t uIntrState = 0;
2538 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2539 {
2540 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
2541 AssertMsg((pVCpu->hm.s.vmx.fUpdatedGuestState & (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS))
2542 == (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS), ("%#x\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
2543 if (pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
2544 {
2545 /*
2546 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
2547 * VT-x the flag's condition to be cleared is met and thus the cleared state is correct.
2548 */
2549 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2550 }
2551 else if (pMixedCtx->eflags.Bits.u1IF)
2552 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
2553 else
2554 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
2555 }
2556 return uIntrState;
2557}
2558
2559
2560/**
2561 * Loads the guest's interruptibility-state into the guest-state area in the
2562 * VMCS.
2563 *
2564 * @returns VBox status code.
2565 * @param pVCpu Pointer to the VMCPU.
2566 * @param uIntrState The interruptibility-state to set.
2567 */
2568static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
2569{
2570 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */
2571 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
2572 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
2573 AssertRCReturn(rc, rc);
2574 return rc;
2575}
2576
2577
2578/**
2579 * Loads the guest's RIP into the guest-state area in the VMCS.
2580 *
2581 * @returns VBox status code.
2582 * @param pVCpu Pointer to the VMCPU.
2583 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2584 * out-of-sync. Make sure to update the required fields
2585 * before using them.
2586 *
2587 * @remarks No-long-jump zone!!!
2588 */
2589static int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2590{
2591 int rc = VINF_SUCCESS;
2592 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RIP)
2593 {
2594 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
2595 AssertRCReturn(rc, rc);
2596 Log(("Load: VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip));
2597 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP;
2598 }
2599 return rc;
2600}
2601
2602
2603/**
2604 * Loads the guest's RSP into the guest-state area in the VMCS.
2605 *
2606 * @returns VBox status code.
2607 * @param pVCpu Pointer to the VMCPU.
2608 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2609 * out-of-sync. Make sure to update the required fields
2610 * before using them.
2611 *
2612 * @remarks No-long-jump zone!!!
2613 */
2614static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2615{
2616 int rc = VINF_SUCCESS;
2617 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RSP)
2618 {
2619 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
2620 AssertRCReturn(rc, rc);
2621 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP;
2622 }
2623 return rc;
2624}
2625
2626
2627/**
2628 * Loads the guest's RFLAGS into the guest-state area in the VMCS.
2629 *
2630 * @returns VBox status code.
2631 * @param pVCpu Pointer to the VMCPU.
2632 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2633 * out-of-sync. Make sure to update the required fields
2634 * before using them.
2635 *
2636 * @remarks No-long-jump zone!!!
2637 */
2638static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2639{
2640 int rc = VINF_SUCCESS;
2641 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
2642 {
2643 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
2644 Let us assert it as such and use 32-bit VMWRITE. */
2645 Assert(!(pMixedCtx->rflags.u64 >> 32));
2646 X86EFLAGS uEFlags = pMixedCtx->eflags;
2647 uEFlags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
2648 uEFlags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */
2649
2650 /*
2651 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM exit.
2652 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
2653 */
2654 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2655 {
2656 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2657 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2658 pVCpu->hm.s.vmx.RealMode.eflags.u32 = uEFlags.u32; /* Save the original eflags of the real-mode guest. */
2659 uEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
2660 uEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
2661 }
2662
2663 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, uEFlags.u32);
2664 AssertRCReturn(rc, rc);
2665
2666 Log(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", uEFlags.u32));
2667 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;
2668 }
2669 return rc;
2670}
2671
2672
2673/**
2674 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
2675 *
2676 * @returns VBox status code.
2677 * @param pVCpu Pointer to the VMCPU.
2678 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2679 * out-of-sync. Make sure to update the required fields
2680 * before using them.
2681 *
2682 * @remarks No-long-jump zone!!!
2683 */
2684static int hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2685{
2686 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
2687 rc |= hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
2688 rc |= hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
2689 return rc;
2690}
2691
2692
2693/**
2694 * Loads the guest control registers (CR0, CR3, CR4) into the guest-state area
2695 * in the VMCS.
2696 *
2697 * @returns VBox status code.
2698 * @param pVM Pointer to the VM.
2699 * @param pVCpu Pointer to the VMCPU.
2700 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2701 * out-of-sync. Make sure to update the required fields
2702 * before using them.
2703 *
2704 * @remarks No-long-jump zone!!!
2705 */
2706static int hmR0VmxLoadGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
2707{
2708 int rc = VINF_SUCCESS;
2709 PVM pVM = pVCpu->CTX_SUFF(pVM);
2710
2711 /*
2712 * Guest CR0.
2713 * Guest FPU.
2714 */
2715 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
2716 {
2717 Assert(!(pCtx->cr0 >> 32));
2718 uint32_t u32GuestCR0 = pCtx->cr0;
2719
2720 /* The guest's view (read access) of its CR0 is unblemished. */
2721 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
2722 AssertRCReturn(rc, rc);
2723 Log(("Load: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", u32GuestCR0));
2724
2725 /* Setup VT-x's view of the guest CR0. */
2726 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
2727 if (pVM->hm.s.fNestedPaging)
2728 {
2729 if (CPUMIsGuestPagingEnabledEx(pCtx))
2730 {
2731 /* The guest has paging enabled, let it access CR3 without causing a VM exit if supported. */
2732 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
2733 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT);
2734 }
2735 else
2736 {
2737 /* The guest doesn't have paging enabled, make CR3 access to cause VM exits to update our shadow. */
2738 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
2739 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
2740 }
2741
2742 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
2743 AssertRCReturn(rc, rc);
2744 }
2745 else
2746 u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a VM-exit. */
2747
2748 /*
2749 * Guest FPU bits.
2750 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
2751 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2752 */
2753 u32GuestCR0 |= X86_CR0_NE;
2754 bool fInterceptNM = false;
2755 if (CPUMIsGuestFPUStateActive(pVCpu))
2756 {
2757 fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
2758 /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
2759 We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
2760 }
2761 else
2762 {
2763 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
2764 u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
2765 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
2766 }
2767
2768 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
2769 bool fInterceptMF = false;
2770 if (!(pCtx->cr0 & X86_CR0_NE))
2771 fInterceptMF = true;
2772
2773 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
2774 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2775 {
2776 Assert(PDMVmmDevHeapIsEnabled(pVM));
2777 Assert(pVM->hm.s.vmx.pRealModeTSS);
2778 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2779 fInterceptNM = true;
2780 fInterceptMF = true;
2781 }
2782 else
2783 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2784
2785 if (fInterceptNM)
2786 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
2787 else
2788 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
2789
2790 if (fInterceptMF)
2791 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
2792 else
2793 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
2794
2795 /* Additional intercepts for debugging, define these yourself explicitly. */
2796#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2797 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_BP)
2798 | RT_BIT(X86_XCPT_DB)
2799 | RT_BIT(X86_XCPT_DE)
2800 | RT_BIT(X86_XCPT_NM)
2801 | RT_BIT(X86_XCPT_UD)
2802 | RT_BIT(X86_XCPT_NP)
2803 | RT_BIT(X86_XCPT_SS)
2804 | RT_BIT(X86_XCPT_GP)
2805 | RT_BIT(X86_XCPT_PF)
2806 | RT_BIT(X86_XCPT_MF);
2807#elif defined(HMVMX_ALWAYS_TRAP_PF)
2808 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
2809#endif
2810
2811 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
2812
2813 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
2814 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 & pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
2815 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 | pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
2816 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
2817 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
2818 else
2819 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2820
2821 u32GuestCR0 |= uSetCR0;
2822 u32GuestCR0 &= uZapCR0;
2823 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
2824
2825 /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */
2826 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
2827 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
2828 Log(("Load: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", u32GuestCR0, uSetCR0, uZapCR0));
2829
2830 /*
2831 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
2832 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
2833 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
2834 */
2835 uint64_t u64CR0Mask = 0;
2836 u64CR0Mask = X86_CR0_PE
2837 | X86_CR0_NE
2838 | X86_CR0_WP
2839 | X86_CR0_PG
2840 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
2841 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
2842 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
2843 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2844 u64CR0Mask &= ~X86_CR0_PE;
2845 if (pVM->hm.s.fNestedPaging)
2846 u64CR0Mask &= ~X86_CR0_WP;
2847
2848 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
2849 if (fInterceptNM)
2850 u64CR0Mask |= (X86_CR0_TS | X86_CR0_MP);
2851 else
2852 u64CR0Mask &= ~(X86_CR0_TS | X86_CR0_MP);
2853
2854 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
2855 pVCpu->hm.s.vmx.cr0_mask = u64CR0Mask;
2856 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, u64CR0Mask);
2857 AssertRCReturn(rc, rc);
2858
2859 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
2860 }
2861
2862 /*
2863 * Guest CR2.
2864 * It's always loaded in the assembler code. Nothing to do here.
2865 */
2866
2867 /*
2868 * Guest CR3.
2869 */
2870 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
2871 {
2872 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
2873 if (pVM->hm.s.fNestedPaging)
2874 {
2875 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2876
2877 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2878 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
2879 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2880 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
2881
2882 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
2883 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
2884 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
2885
2886 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2887 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2888 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x3f) == 0, /* Bits 6:11 MBZ. */
2889 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
2890
2891 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
2892 AssertRCReturn(rc, rc);
2893 Log(("Load: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
2894
2895 if ( pVM->hm.s.vmx.fUnrestrictedGuest
2896 || CPUMIsGuestPagingEnabledEx(pCtx))
2897 {
2898 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2899 if (CPUMIsGuestInPAEModeEx(pCtx))
2900 {
2901 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
2902 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u);
2903 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u);
2904 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u);
2905 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u);
2906 AssertRCReturn(rc, rc);
2907 }
2908
2909 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
2910 have Unrestricted Execution to handle the guest when it's not using paging. */
2911 GCPhysGuestCR3 = pCtx->cr3;
2912 }
2913 else
2914 {
2915 /*
2916 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
2917 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
2918 * EPT takes care of translating it to host-physical addresses.
2919 */
2920 RTGCPHYS GCPhys;
2921 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2922 Assert(PDMVmmDevHeapIsEnabled(pVM));
2923
2924 /* We obtain it here every time as the guest could have relocated this PCI region. */
2925 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2926 AssertRCReturn(rc, rc);
2927
2928 GCPhysGuestCR3 = GCPhys;
2929 }
2930 }
2931 else
2932 {
2933 /* Non-nested paging case, just use the hypervisor's CR3. */
2934 GCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
2935 }
2936
2937 Log(("Load: VMX_VMCS_GUEST_CR3=%#RGv\n", GCPhysGuestCR3));
2938 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
2939 AssertRCReturn(rc, rc);
2940
2941 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR3;
2942 }
2943
2944 /*
2945 * Guest CR4.
2946 */
2947 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
2948 {
2949 Assert(!(pCtx->cr4 >> 32));
2950 uint32_t u32GuestCR4 = pCtx->cr4;
2951
2952 /* The guest's view of its CR4 is unblemished. */
2953 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
2954 AssertRCReturn(rc, rc);
2955 Log(("Load: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", u32GuestCR4));
2956
2957 /* Setup VT-x's view of the guest CR4. */
2958 /*
2959 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
2960 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2961 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2962 */
2963 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2964 {
2965 Assert(pVM->hm.s.vmx.pRealModeTSS);
2966 Assert(PDMVmmDevHeapIsEnabled(pVM));
2967 u32GuestCR4 &= ~X86_CR4_VME;
2968 }
2969
2970 if (pVM->hm.s.fNestedPaging)
2971 {
2972 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2973 && !pVM->hm.s.vmx.fUnrestrictedGuest)
2974 {
2975 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2976 u32GuestCR4 |= X86_CR4_PSE;
2977 /* Our identity mapping is a 32 bits page directory. */
2978 u32GuestCR4 &= ~X86_CR4_PAE;
2979 }
2980 /* else use guest CR4.*/
2981 }
2982 else
2983 {
2984 /*
2985 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2986 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2987 */
2988 switch (pVCpu->hm.s.enmShadowMode)
2989 {
2990 case PGMMODE_REAL: /* Real-mode. */
2991 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2992 case PGMMODE_32_BIT: /* 32-bit paging. */
2993 {
2994 u32GuestCR4 &= ~X86_CR4_PAE;
2995 break;
2996 }
2997
2998 case PGMMODE_PAE: /* PAE paging. */
2999 case PGMMODE_PAE_NX: /* PAE paging with NX. */
3000 {
3001 u32GuestCR4 |= X86_CR4_PAE;
3002 break;
3003 }
3004
3005 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
3006 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
3007#ifdef VBOX_ENABLE_64_BITS_GUESTS
3008 break;
3009#endif
3010 default:
3011 AssertFailed();
3012 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
3013 }
3014 }
3015
3016 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
3017 uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 & pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
3018 uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 | pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
3019 u32GuestCR4 |= uSetCR4;
3020 u32GuestCR4 &= uZapCR4;
3021
3022 /* Write VT-x's view of the guest CR4 into the VMCS. */
3023 Log(("Load: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", u32GuestCR4, uSetCR4, uZapCR4));
3024 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4);
3025
3026 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM exit. */
3027 uint64_t u64CR4Mask = 0;
3028 u64CR4Mask = X86_CR4_VME
3029 | X86_CR4_PAE
3030 | X86_CR4_PGE
3031 | X86_CR4_PSE
3032 | X86_CR4_VMXE;
3033 pVCpu->hm.s.vmx.cr4_mask = u64CR4Mask;
3034 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, u64CR4Mask);
3035 AssertRCReturn(rc, rc);
3036
3037 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
3038 }
3039 return rc;
3040}
3041
3042
3043/**
3044 * Loads the guest debug registers into the guest-state area in the VMCS.
3045 * This also sets up whether #DB and MOV DRx accesses cause VM exits.
3046 *
3047 * @returns VBox status code.
3048 * @param pVCpu Pointer to the VMCPU.
3049 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3050 * out-of-sync. Make sure to update the required fields
3051 * before using them.
3052 *
3053 * @remarks No-long-jump zone!!!
3054 */
3055static int hmR0VmxLoadGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3056{
3057 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
3058 return VINF_SUCCESS;
3059
3060#ifdef VBOX_STRICT
3061 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
3062 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
3063 {
3064 Assert(!(pMixedCtx->dr[7] >> 32)); /* upper 32 bits are reserved (MBZ). */
3065 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
3066 Assert((pMixedCtx->dr[7] & 0xd800) == 0); /* bits 15, 14, 12, 11 are reserved (MBZ). */
3067 Assert((pMixedCtx->dr[7] & 0x400) == 0x400); /* bit 10 is reserved (MB1). */
3068 }
3069#endif
3070
3071 int rc = VERR_INTERNAL_ERROR_5;
3072 PVM pVM = pVCpu->CTX_SUFF(pVM);
3073 bool fInterceptDB = false;
3074 bool fInterceptMovDRx = false;
3075 if (DBGFIsStepping(pVCpu))
3076 {
3077 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
3078 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
3079 {
3080 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
3081 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
3082 AssertRCReturn(rc, rc);
3083 Assert(fInterceptDB == false);
3084 }
3085 else
3086 fInterceptDB = true;
3087 }
3088
3089 if (CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
3090 {
3091 if (!CPUMIsHyperDebugStateActive(pVCpu))
3092 {
3093 rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
3094 AssertRC(rc);
3095 }
3096 Assert(CPUMIsHyperDebugStateActive(pVCpu));
3097 fInterceptMovDRx = true;
3098 }
3099 else if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
3100 {
3101 if (!CPUMIsGuestDebugStateActive(pVCpu))
3102 {
3103 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
3104 AssertRC(rc);
3105 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
3106 }
3107 Assert(CPUMIsGuestDebugStateActive(pVCpu));
3108 Assert(fInterceptMovDRx == false);
3109 }
3110 else if (!CPUMIsGuestDebugStateActive(pVCpu))
3111 {
3112 /* For the first time we would need to intercept MOV DRx accesses even when the guest debug registers aren't loaded. */
3113 fInterceptMovDRx = true;
3114 }
3115
3116 /* Update the exception bitmap regarding intercepting #DB generated by the guest. */
3117 if (fInterceptDB)
3118 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
3119 else if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3120 {
3121#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3122 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
3123#endif
3124 }
3125
3126 /* Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions. */
3127 if (fInterceptMovDRx)
3128 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
3129 else
3130 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
3131
3132 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3133 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
3134
3135 /* The guest's view of its DR7 is unblemished. */
3136 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
3137
3138 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
3139 return rc;
3140}
3141
3142
3143#ifdef VBOX_STRICT
3144/**
3145 * Strict function to validate segment registers.
3146 *
3147 * @remarks Requires CR0.
3148 */
3149static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3150{
3151 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3152 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
3153 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
3154 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
3155 && ( !CPUMIsGuestInRealModeEx(pCtx)
3156 && !CPUMIsGuestInV86ModeEx(pCtx)))
3157 {
3158 /* Protected mode checks */
3159 /* CS */
3160 Assert(pCtx->cs.Attr.n.u1Present);
3161 Assert(!(pCtx->cs.Attr.u & 0xf00));
3162 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
3163 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
3164 || !(pCtx->cs.Attr.n.u1Granularity));
3165 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
3166 || (pCtx->cs.Attr.n.u1Granularity));
3167 Assert(pCtx->cs.Attr.u && pCtx->cs.Attr.u != HMVMX_SEL_UNUSABLE); /* CS cannot be loaded with NULL in protected mode. */
3168 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
3169 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
3170 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
3171 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
3172 else
3173 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
3174 /* SS */
3175 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3176 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
3177 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0));
3178 if ( !(pCtx->cr0 & X86_CR0_PE)
3179 || pCtx->cs.Attr.n.u4Type == 3)
3180 {
3181 Assert(!pCtx->ss.Attr.n.u2Dpl);
3182 }
3183 if (pCtx->ss.Attr.u && pCtx->ss.Attr.u != HMVMX_SEL_UNUSABLE)
3184 {
3185 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3186 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
3187 Assert(pCtx->ss.Attr.n.u1Present);
3188 Assert(!(pCtx->ss.Attr.u & 0xf00));
3189 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
3190 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
3191 || !(pCtx->ss.Attr.n.u1Granularity));
3192 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
3193 || (pCtx->ss.Attr.n.u1Granularity));
3194 }
3195 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
3196 if (pCtx->ds.Attr.u && pCtx->ds.Attr.u != HMVMX_SEL_UNUSABLE)
3197 {
3198 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3199 Assert(pCtx->ds.Attr.n.u1Present);
3200 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
3201 Assert(!(pCtx->ds.Attr.u & 0xf00));
3202 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
3203 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
3204 || !(pCtx->ds.Attr.n.u1Granularity));
3205 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
3206 || (pCtx->ds.Attr.n.u1Granularity));
3207 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3208 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
3209 }
3210 if (pCtx->es.Attr.u && pCtx->es.Attr.u != HMVMX_SEL_UNUSABLE)
3211 {
3212 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3213 Assert(pCtx->es.Attr.n.u1Present);
3214 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
3215 Assert(!(pCtx->es.Attr.u & 0xf00));
3216 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
3217 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
3218 || !(pCtx->es.Attr.n.u1Granularity));
3219 Assert( !(pCtx->es.u32Limit & 0xfff00000)
3220 || (pCtx->es.Attr.n.u1Granularity));
3221 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3222 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
3223 }
3224 if (pCtx->fs.Attr.u && pCtx->fs.Attr.u != HMVMX_SEL_UNUSABLE)
3225 {
3226 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3227 Assert(pCtx->fs.Attr.n.u1Present);
3228 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
3229 Assert(!(pCtx->fs.Attr.u & 0xf00));
3230 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
3231 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
3232 || !(pCtx->fs.Attr.n.u1Granularity));
3233 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
3234 || (pCtx->fs.Attr.n.u1Granularity));
3235 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3236 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3237 }
3238 if (pCtx->gs.Attr.u && pCtx->gs.Attr.u != HMVMX_SEL_UNUSABLE)
3239 {
3240 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3241 Assert(pCtx->gs.Attr.n.u1Present);
3242 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
3243 Assert(!(pCtx->gs.Attr.u & 0xf00));
3244 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
3245 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
3246 || !(pCtx->gs.Attr.n.u1Granularity));
3247 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
3248 || (pCtx->gs.Attr.n.u1Granularity));
3249 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3250 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3251 }
3252 /* 64-bit capable CPUs. */
3253# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3254 Assert(!(pCtx->cs.u64Base >> 32));
3255 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
3256 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
3257 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
3258# endif
3259 }
3260 else if ( CPUMIsGuestInV86ModeEx(pCtx)
3261 || ( CPUMIsGuestInRealModeEx(pCtx)
3262 && !pVM->hm.s.vmx.fUnrestrictedGuest))
3263 {
3264 /* Real and v86 mode checks. */
3265 /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
3266 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
3267 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3268 {
3269 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
3270 }
3271 else
3272 {
3273 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
3274 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
3275 }
3276
3277 /* CS */
3278 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
3279 Assert(pCtx->cs.u32Limit == 0xffff);
3280 Assert(u32CSAttr == 0xf3);
3281 /* SS */
3282 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
3283 Assert(pCtx->ss.u32Limit == 0xffff);
3284 Assert(u32SSAttr == 0xf3);
3285 /* DS */
3286 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
3287 Assert(pCtx->ds.u32Limit == 0xffff);
3288 Assert(u32DSAttr == 0xf3);
3289 /* ES */
3290 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
3291 Assert(pCtx->es.u32Limit == 0xffff);
3292 Assert(u32ESAttr == 0xf3);
3293 /* FS */
3294 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
3295 Assert(pCtx->fs.u32Limit == 0xffff);
3296 Assert(u32FSAttr == 0xf3);
3297 /* GS */
3298 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
3299 Assert(pCtx->gs.u32Limit == 0xffff);
3300 Assert(u32GSAttr == 0xf3);
3301 /* 64-bit capable CPUs. */
3302# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3303 Assert(!(pCtx->cs.u64Base >> 32));
3304 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
3305 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
3306 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
3307# endif
3308 }
3309}
3310#endif /* VBOX_STRICT */
3311
3312
3313/**
3314 * Writes a guest segment register into the guest-state area in the VMCS.
3315 *
3316 * @returns VBox status code.
3317 * @param pVCpu Pointer to the VMCPU.
3318 * @param idxSel Index of the selector in the VMCS.
3319 * @param idxLimit Index of the segment limit in the VMCS.
3320 * @param idxBase Index of the segment base in the VMCS.
3321 * @param idxAccess Index of the access rights of the segment in the VMCS.
3322 * @param pSelReg Pointer to the segment selector.
3323 * @param pCtx Pointer to the guest-CPU context.
3324 *
3325 * @remarks No-long-jump zone!!!
3326 */
3327static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
3328 uint32_t idxAccess, PCPUMSELREG pSelReg, PCPUMCTX pCtx)
3329{
3330 int rc;
3331 rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
3332 rc |= VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
3333 rc |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
3334 AssertRCReturn(rc, rc);
3335
3336 uint32_t u32Access = pSelReg->Attr.u;
3337 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3338 {
3339 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
3340 u32Access = 0xf3;
3341 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3342 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3343 }
3344 else
3345 {
3346 /*
3347 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
3348 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
3349 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
3350 * loaded in protected-mode have their attribute as 0.
3351 */
3352 if (!u32Access)
3353 u32Access = HMVMX_SEL_UNUSABLE;
3354 }
3355
3356 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
3357 AssertMsg((u32Access == HMVMX_SEL_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
3358 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
3359
3360 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
3361 AssertRCReturn(rc, rc);
3362 return rc;
3363}
3364
3365
3366/**
3367 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
3368 * into the guest-state area in the VMCS.
3369 *
3370 * @returns VBox status code.
3371 * @param pVM Pointer to the VM.
3372 * @param pVCPU Pointer to the VMCPU.
3373 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3374 * out-of-sync. Make sure to update the required fields
3375 * before using them.
3376 *
3377 * @remarks Requires CR0 (strict builds validation).
3378 * @remarks No-long-jump zone!!!
3379 */
3380static int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3381{
3382 int rc = VERR_INTERNAL_ERROR_5;
3383 PVM pVM = pVCpu->CTX_SUFF(pVM);
3384
3385 /*
3386 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
3387 */
3388 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
3389 {
3390 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
3391 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3392 {
3393 pVCpu->hm.s.vmx.RealMode.uAttrCS.u = pMixedCtx->cs.Attr.u;
3394 pVCpu->hm.s.vmx.RealMode.uAttrSS.u = pMixedCtx->ss.Attr.u;
3395 pVCpu->hm.s.vmx.RealMode.uAttrDS.u = pMixedCtx->ds.Attr.u;
3396 pVCpu->hm.s.vmx.RealMode.uAttrES.u = pMixedCtx->es.Attr.u;
3397 pVCpu->hm.s.vmx.RealMode.uAttrFS.u = pMixedCtx->fs.Attr.u;
3398 pVCpu->hm.s.vmx.RealMode.uAttrGS.u = pMixedCtx->gs.Attr.u;
3399 }
3400
3401#ifdef VBOX_WITH_REM
3402 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
3403 {
3404 Assert(pVM->hm.s.vmx.pRealModeTSS);
3405 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
3406 if ( pVCpu->hm.s.vmx.fWasInRealMode
3407 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
3408 {
3409 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
3410 in real-mode (e.g. OpenBSD 4.0) */
3411 REMFlushTBs(pVM);
3412 Log(("Load: Switch to protected mode detected!\n"));
3413 pVCpu->hm.s.vmx.fWasInRealMode = false;
3414 }
3415 }
3416#endif
3417 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
3418 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs, pMixedCtx);
3419 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
3420 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss, pMixedCtx);
3421 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
3422 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds, pMixedCtx);
3423 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
3424 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es, pMixedCtx);
3425 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
3426 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs, pMixedCtx);
3427 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
3428 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs, pMixedCtx);
3429 AssertRCReturn(rc, rc);
3430
3431#ifdef VBOX_STRICT
3432 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
3433#endif
3434 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
3435 }
3436
3437 /*
3438 * Guest TR.
3439 */
3440 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
3441 {
3442 /*
3443 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
3444 * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
3445 * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
3446 */
3447 uint16_t u16Sel = 0;
3448 uint32_t u32Limit = 0;
3449 uint64_t u64Base = 0;
3450 uint32_t u32AccessRights = 0;
3451
3452 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3453 {
3454 u16Sel = pMixedCtx->tr.Sel;
3455 u32Limit = pMixedCtx->tr.u32Limit;
3456 u64Base = pMixedCtx->tr.u64Base;
3457 u32AccessRights = pMixedCtx->tr.Attr.u;
3458 }
3459 else
3460 {
3461 Assert(pVM->hm.s.vmx.pRealModeTSS);
3462 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
3463
3464 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
3465 RTGCPHYS GCPhys;
3466 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
3467 AssertRCReturn(rc, rc);
3468
3469 X86DESCATTR DescAttr;
3470 DescAttr.u = 0;
3471 DescAttr.n.u1Present = 1;
3472 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
3473
3474 u16Sel = 0;
3475 u32Limit = HM_VTX_TSS_SIZE;
3476 u64Base = GCPhys; /* in real-mode phys = virt. */
3477 u32AccessRights = DescAttr.u;
3478 }
3479
3480 /* Validate. */
3481 Assert(!(u16Sel & RT_BIT(2)));
3482 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
3483 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
3484 AssertMsg(!(u32AccessRights & HMVMX_SEL_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
3485 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
3486 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
3487 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
3488 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
3489 Assert( (u32Limit & 0xfff) == 0xfff
3490 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
3491 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
3492 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
3493
3494 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel);
3495 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit);
3496 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base);
3497 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights);
3498 AssertRCReturn(rc, rc);
3499
3500 Log(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base));
3501 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;
3502 }
3503
3504 /*
3505 * Guest GDTR.
3506 */
3507 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
3508 {
3509 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt);
3510 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt);
3511 AssertRCReturn(rc, rc);
3512
3513 Assert(!(pMixedCtx->gdtr.cbGdt & UINT64_C(0xffff0000))); /* Bits 31:16 MBZ. */
3514 Log(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt));
3515 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
3516 }
3517
3518 /*
3519 * Guest LDTR.
3520 */
3521 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
3522 {
3523 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
3524 uint32_t u32Access = 0;
3525 if (!pMixedCtx->ldtr.Attr.u)
3526 u32Access = HMVMX_SEL_UNUSABLE;
3527 else
3528 u32Access = pMixedCtx->ldtr.Attr.u;
3529
3530 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel);
3531 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit);
3532 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base);
3533 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access);
3534 AssertRCReturn(rc, rc);
3535
3536 /* Validate. */
3537 if (!(u32Access & HMVMX_SEL_UNUSABLE))
3538 {
3539 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
3540 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
3541 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
3542 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
3543 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
3544 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
3545 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
3546 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
3547 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
3548 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
3549 }
3550
3551 Log(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base));
3552 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;
3553 }
3554
3555 /*
3556 * Guest IDTR.
3557 */
3558 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
3559 {
3560 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt);
3561 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt);
3562 AssertRCReturn(rc, rc);
3563
3564 Assert(!(pMixedCtx->idtr.cbIdt & UINT64_C(0xffff0000))); /* Bits 31:16 MBZ. */
3565 Log(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt));
3566 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
3567 }
3568
3569 return VINF_SUCCESS;
3570}
3571
3572
3573/**
3574 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
3575 * areas. These MSRs will automatically be loaded to the host CPU on every
3576 * successful VM entry and stored from the host CPU on every successful VM exit.
3577 * Also loads the sysenter MSRs into the guest-state area in the VMCS.
3578 *
3579 * @returns VBox status code.
3580 * @param pVCpu Pointer to the VMCPU.
3581 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3582 * out-of-sync. Make sure to update the required fields
3583 * before using them.
3584 *
3585 * @remarks No-long-jump zone!!!
3586 */
3587static int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3588{
3589 AssertPtr(pVCpu);
3590 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
3591
3592 /*
3593 * MSRs covered by Auto-load/store: EFER, LSTAR, STAR, SF_MASK, TSC_AUX (RDTSCP).
3594 */
3595 int rc = VINF_SUCCESS;
3596 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
3597 {
3598#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
3599 PVM pVM = pVCpu->CTX_SUFF(pVM);
3600 PVMXMSR pGuestMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
3601 uint32_t cGuestMsrs = 0;
3602
3603 /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */
3604 const bool fSupportsNX = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
3605 const bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
3606 if (fSupportsNX || fSupportsLongMode)
3607 {
3608 /** @todo support save IA32_EFER, i.e.
3609 * VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR, in which case the
3610 * guest EFER need not be part of the VM-entry MSR-load area. */
3611 pGuestMsr->u32IndexMSR = MSR_K6_EFER;
3612 pGuestMsr->u32Reserved = 0;
3613 pGuestMsr->u64Value = pMixedCtx->msrEFER;
3614 /* VT-x will complain if only MSR_K6_EFER_LME is set. See Intel spec. 26.4 "Loading MSRs" for details. */
3615 if (!CPUMIsGuestInLongModeEx(pMixedCtx))
3616 pGuestMsr->u64Value &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
3617 pGuestMsr++; cGuestMsrs++;
3618 if (fSupportsLongMode)
3619 {
3620 pGuestMsr->u32IndexMSR = MSR_K8_LSTAR;
3621 pGuestMsr->u32Reserved = 0;
3622 pGuestMsr->u64Value = pMixedCtx->msrLSTAR; /* 64 bits mode syscall rip */
3623 pGuestMsr++; cGuestMsrs++;
3624 pGuestMsr->u32IndexMSR = MSR_K6_STAR;
3625 pGuestMsr->u32Reserved = 0;
3626 pGuestMsr->u64Value = pMixedCtx->msrSTAR; /* legacy syscall eip, cs & ss */
3627 pGuestMsr++; cGuestMsrs++;
3628 pGuestMsr->u32IndexMSR = MSR_K8_SF_MASK;
3629 pGuestMsr->u32Reserved = 0;
3630 pGuestMsr->u64Value = pMixedCtx->msrSFMASK; /* syscall flag mask */
3631 pGuestMsr++; cGuestMsrs++;
3632 pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
3633 pGuestMsr->u32Reserved = 0;
3634 pGuestMsr->u64Value = pMixedCtx->msrKERNELGSBASE; /* swapgs exchange value */
3635 pGuestMsr++; cGuestMsrs++;
3636 }
3637 }
3638
3639 /*
3640 * RDTSCP requires the TSC_AUX MSR. Host and guest share the physical MSR. So we have to
3641 * load the guest's copy if the guest can execute RDTSCP without causing VM-exits.
3642 */
3643 if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP)
3644 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP))
3645 {
3646 pGuestMsr->u32IndexMSR = MSR_K8_TSC_AUX;
3647 pGuestMsr->u32Reserved = 0;
3648 rc = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pGuestMsr->u64Value);
3649 AssertRCReturn(rc, rc);
3650 pGuestMsr++; cGuestMsrs++;
3651 }
3652
3653 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
3654 if (cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc))
3655 {
3656 LogRel(("CPU autoload/store MSR count in VMCS exceeded cGuestMsrs=%u.\n", cGuestMsrs));
3657 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3658 }
3659
3660 /* Update the VCPU's copy of the guest MSR count. */
3661 pVCpu->hm.s.vmx.cGuestMsrs = cGuestMsrs;
3662 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cGuestMsrs);
3663 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs);
3664 AssertRCReturn(rc, rc);
3665#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
3666
3667 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS;
3668 }
3669
3670 /*
3671 * Guest Sysenter MSRs.
3672 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
3673 * VM-exits on WRMSRs for these MSRs.
3674 */
3675 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
3676 {
3677 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs);
3678 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_CS_MSR;
3679 }
3680 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
3681 {
3682 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip);
3683 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR;
3684 }
3685 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
3686 {
3687 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp);
3688 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR;
3689 }
3690 AssertRCReturn(rc, rc);
3691
3692 return rc;
3693}
3694
3695
3696/**
3697 * Loads the guest activity state into the guest-state area in the VMCS.
3698 *
3699 * @returns VBox status code.
3700 * @param pVCpu Pointer to the VMCPU.
3701 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3702 * out-of-sync. Make sure to update the required fields
3703 * before using them.
3704 *
3705 * @remarks No-long-jump zone!!!
3706 */
3707static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
3708{
3709 /** @todo See if we can make use of other states, e.g.
3710 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
3711 int rc = VINF_SUCCESS;
3712 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_ACTIVITY_STATE)
3713 {
3714 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
3715 AssertRCReturn(rc, rc);
3716 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_ACTIVITY_STATE;
3717 }
3718 return rc;
3719}
3720
3721
3722/**
3723 * Sets up the appropriate function to run guest code.
3724 *
3725 * @returns VBox status code.
3726 * @param pVCpu Pointer to the VMCPU.
3727 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3728 * out-of-sync. Make sure to update the required fields
3729 * before using them.
3730 *
3731 * @remarks No-long-jump zone!!!
3732 */
3733static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3734{
3735 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3736 {
3737#ifndef VBOX_ENABLE_64_BITS_GUESTS
3738 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
3739#endif
3740 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
3741#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3742 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
3743 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
3744#else
3745 /* 64-bit host or hybrid host. */
3746 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
3747#endif
3748 }
3749 else
3750 {
3751 /* Guest is not in long mode, use the 32-bit handler. */
3752 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
3753 }
3754 Assert(pVCpu->hm.s.vmx.pfnStartVM);
3755 return VINF_SUCCESS;
3756}
3757
3758
3759/**
3760 * Wrapper for running the guest code in VT-x.
3761 *
3762 * @returns VBox strict status code.
3763 * @param pVM Pointer to the VM.
3764 * @param pVCpu Pointer to the VMCPU.
3765 * @param pCtx Pointer to the guest-CPU context.
3766 *
3767 * @remarks No-long-jump zone!!!
3768 */
3769DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3770{
3771 /*
3772 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
3773 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
3774 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
3775 */
3776#ifdef VBOX_WITH_KERNEL_USING_XMM
3777 return hmR0VMXStartVMWrapXMM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
3778#else
3779 return pVCpu->hm.s.vmx.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
3780#endif
3781}
3782
3783
3784/**
3785 * Report world-switch error and dump some useful debug info.
3786 *
3787 * @param pVM Pointer to the VM.
3788 * @param pVCpu Pointer to the VMCPU.
3789 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
3790 * @param pCtx Pointer to the guest-CPU context.
3791 * @param pVmxTransient Pointer to the VMX transient structure (only
3792 * exitReason updated).
3793 */
3794static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
3795{
3796 Assert(pVM);
3797 Assert(pVCpu);
3798 Assert(pCtx);
3799 Assert(pVmxTransient);
3800 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3801
3802 Log(("VM-entry failure: %Rrc\n", rcVMRun));
3803 switch (rcVMRun)
3804 {
3805 case VERR_VMX_INVALID_VMXON_PTR:
3806 AssertFailed();
3807 break;
3808 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
3809 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
3810 {
3811 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.lasterror.u32ExitReason);
3812 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.lasterror.u32InstrError);
3813 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
3814 AssertRC(rc);
3815
3816#ifdef VBOX_STRICT
3817 Log(("uExitReason %#x (VmxTransient %#x)\n", pVCpu->hm.s.vmx.lasterror.u32ExitReason,
3818 pVmxTransient->uExitReason));
3819 Log(("Exit Qualification %#x\n", pVmxTransient->uExitQualification));
3820 Log(("InstrError %#x\n", pVCpu->hm.s.vmx.lasterror.u32InstrError));
3821 if (pVCpu->hm.s.vmx.lasterror.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
3822 Log(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.lasterror.u32InstrError]));
3823 else
3824 Log(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
3825
3826 /* VMX control bits. */
3827 uint32_t u32Val;
3828 uint64_t u64Val;
3829 HMVMXHCUINTREG uHCReg;
3830 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, &u32Val); AssertRC(rc);
3831 Log(("VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS %#RX32\n", u32Val));
3832 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, &u32Val); AssertRC(rc);
3833 Log(("VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS %#RX32\n", u32Val));
3834 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2, &u32Val); AssertRC(rc);
3835 Log(("VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2 %#RX32\n", u32Val));
3836 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_CONTROLS, &u32Val); AssertRC(rc);
3837 Log(("VMX_VMCS32_CTRL_ENTRY_CONTROLS %#RX32\n", u32Val));
3838 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_CONTROLS, &u32Val); AssertRC(rc);
3839 Log(("VMX_VMCS32_CTRL_EXIT_CONTROLS %#RX32\n", u32Val));
3840 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
3841 Log(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
3842 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
3843 Log(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
3844 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
3845 Log(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
3846 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
3847 Log(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
3848 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
3849 Log(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
3850 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
3851 Log(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
3852 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
3853 Log(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
3854 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
3855 Log(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
3856 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
3857 Log(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
3858 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
3859 Log(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
3860 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
3861 Log(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
3862 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
3863 Log(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
3864 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
3865 Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
3866 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
3867 Log(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
3868 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
3869 Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
3870 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
3871 Log(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
3872
3873 /* Guest bits. */
3874 RTGCUINTREG uGCReg;
3875 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &uGCReg); AssertRC(rc);
3876 Log(("Old Guest Rip %#RGv New %#RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)uGCReg));
3877 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &uGCReg); AssertRC(rc);
3878 Log(("Old Guest Rsp %#RGv New %#RGv\n", (RTGCPTR)pCtx->rsp, (RTGCPTR)uGCReg));
3879 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
3880 Log(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
3881 rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc);
3882 Log(("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val));
3883
3884 /* Host bits. */
3885 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
3886 Log(("Host CR0 %#RHr\n", uHCReg));
3887 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
3888 Log(("Host CR3 %#RHr\n", uHCReg));
3889 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
3890 Log(("Host CR4 %#RHr\n", uHCReg));
3891
3892 RTGDTR HostGdtr;
3893 PCX86DESCHC pDesc;
3894 ASMGetGDTR(&HostGdtr);
3895 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_CS, &u32Val);
3896 Log(("Host CS %#08x\n", u32Val));
3897 if (u32Val < HostGdtr.cbGdt)
3898 {
3899 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3900 HMR0DumpDescriptor(pDesc, u32Val, "CS: ");
3901 }
3902
3903 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc);
3904 Log(("Host DS %#08x\n", u32Val));
3905 if (u32Val < HostGdtr.cbGdt)
3906 {
3907 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3908 HMR0DumpDescriptor(pDesc, u32Val, "DS: ");
3909 }
3910
3911 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc);
3912 Log(("Host ES %#08x\n", u32Val));
3913 if (u32Val < HostGdtr.cbGdt)
3914 {
3915 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3916 HMR0DumpDescriptor(pDesc, u32Val, "ES: ");
3917 }
3918
3919 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc);
3920 Log(("Host FS %#08x\n", u32Val));
3921 if (u32Val < HostGdtr.cbGdt)
3922 {
3923 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3924 HMR0DumpDescriptor(pDesc, u32Val, "FS: ");
3925 }
3926
3927 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc);
3928 Log(("Host GS %#08x\n", u32Val));
3929 if (u32Val < HostGdtr.cbGdt)
3930 {
3931 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3932 HMR0DumpDescriptor(pDesc, u32Val, "GS: ");
3933 }
3934
3935 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc);
3936 Log(("Host SS %#08x\n", u32Val));
3937 if (u32Val < HostGdtr.cbGdt)
3938 {
3939 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3940 HMR0DumpDescriptor(pDesc, u32Val, "SS: ");
3941 }
3942
3943 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc);
3944 Log(("Host TR %#08x\n", u32Val));
3945 if (u32Val < HostGdtr.cbGdt)
3946 {
3947 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3948 HMR0DumpDescriptor(pDesc, u32Val, "TR: ");
3949 }
3950
3951 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
3952 Log(("Host TR Base %#RHv\n", uHCReg));
3953 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
3954 Log(("Host GDTR Base %#RHv\n", uHCReg));
3955 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
3956 Log(("Host IDTR Base %#RHv\n", uHCReg));
3957 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
3958 Log(("Host SYSENTER CS %#08x\n", u32Val));
3959 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
3960 Log(("Host SYSENTER EIP %#RHv\n", uHCReg));
3961 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
3962 Log(("Host SYSENTER ESP %#RHv\n", uHCReg));
3963 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
3964 Log(("Host RSP %#RHv\n", uHCReg));
3965 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
3966 Log(("Host RIP %#RHv\n", uHCReg));
3967# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3968 if (HMVMX_IS_64BIT_HOST_MODE())
3969 {
3970 Log(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
3971 Log(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
3972 Log(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
3973 Log(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
3974 Log(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
3975 Log(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
3976 }
3977# endif
3978#endif /* VBOX_STRICT */
3979 break;
3980 }
3981
3982 default:
3983 /* Impossible */
3984 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
3985 break;
3986 }
3987 NOREF(pVM);
3988}
3989
3990
3991#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3992#ifndef VMX_USE_CACHED_VMCS_ACCESSES
3993# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
3994#endif
3995#ifdef VBOX_STRICT
3996static bool hmR0VmxIsValidWriteField(uint32_t idxField)
3997{
3998 switch (idxField)
3999 {
4000 case VMX_VMCS_GUEST_RIP:
4001 case VMX_VMCS_GUEST_RSP:
4002 case VMX_VMCS_GUEST_DR7:
4003 case VMX_VMCS_GUEST_SYSENTER_EIP:
4004 case VMX_VMCS_GUEST_SYSENTER_ESP:
4005 case VMX_VMCS_GUEST_GDTR_BASE:
4006 case VMX_VMCS_GUEST_IDTR_BASE:
4007 case VMX_VMCS_GUEST_CS_BASE:
4008 case VMX_VMCS_GUEST_DS_BASE:
4009 case VMX_VMCS_GUEST_ES_BASE:
4010 case VMX_VMCS_GUEST_FS_BASE:
4011 case VMX_VMCS_GUEST_GS_BASE:
4012 case VMX_VMCS_GUEST_SS_BASE:
4013 case VMX_VMCS_GUEST_LDTR_BASE:
4014 case VMX_VMCS_GUEST_TR_BASE:
4015 case VMX_VMCS_GUEST_CR3:
4016 return true;
4017 }
4018 return false;
4019}
4020
4021static bool hmR0VmxIsValidReadField(uint32_t idxField)
4022{
4023 switch (idxField)
4024 {
4025 /* Read-only fields. */
4026 case VMX_VMCS_RO_EXIT_QUALIFICATION:
4027 return true;
4028 }
4029 /* Remaining readable fields should also be writable. */
4030 return hmR0VmxIsValidWriteField(idxField);
4031}
4032#endif /* VBOX_STRICT */
4033
4034/**
4035 * Executes the specified handler in 64-bit mode.
4036 *
4037 * @returns VBox status code.
4038 * @param pVM Pointer to the VM.
4039 * @param pVCpu Pointer to the VMCPU.
4040 * @param pCtx Pointer to the guest CPU context.
4041 * @param enmOp The operation to perform.
4042 * @param cbParam Number of parameters.
4043 * @param paParam Array of 32-bit parameters.
4044 */
4045VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
4046 uint32_t *paParam)
4047{
4048 int rc, rc2;
4049 PHMGLOBLCPUINFO pCpu;
4050 RTHCPHYS HCPhysCpuPage;
4051 RTCCUINTREG uOldEFlags;
4052
4053 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
4054 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
4055 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
4056 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
4057
4058#ifdef VBOX_STRICT
4059 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
4060 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
4061
4062 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
4063 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
4064#endif
4065
4066 /* Disable interrupts. */
4067 uOldEFlags = ASMIntDisableFlags();
4068
4069#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
4070 RTCPUID idHostCpu = RTMpCpuId();
4071 CPUMR0SetLApic(pVM, idHostCpu);
4072#endif
4073
4074 pCpu = HMR0GetCurrentCpu();
4075 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4076
4077 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
4078 VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
4079
4080 /* Leave VMX Root Mode. */
4081 VMXDisable();
4082
4083 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4084
4085 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
4086 CPUMSetHyperEIP(pVCpu, enmOp);
4087 for (int i = (int)cbParam - 1; i >= 0; i--)
4088 CPUMPushHyper(pVCpu, paParam[i]);
4089
4090 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
4091
4092 /* Call the switcher. */
4093 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
4094 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
4095
4096 /** @todo replace with hmR0VmxEnterRootMode() and LeaveRootMode(). */
4097 /* Make sure the VMX instructions don't cause #UD faults. */
4098 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
4099
4100 /* Re-enter VMX Root Mode */
4101 rc2 = VMXEnable(HCPhysCpuPage);
4102 if (RT_FAILURE(rc2))
4103 {
4104 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4105 ASMSetFlags(uOldEFlags);
4106 return rc2;
4107 }
4108
4109 rc2 = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
4110 AssertRC(rc2);
4111 Assert(!(ASMGetFlags() & X86_EFL_IF));
4112 ASMSetFlags(uOldEFlags);
4113 return rc;
4114}
4115
4116
4117/**
4118 * Prepares for and executes VMLAUNCH (64 bits guests) for 32-bit hosts
4119 * supporting 64-bit guests.
4120 *
4121 * @returns VBox status code.
4122 * @param fResume Whether to VMLAUNCH or VMRESUME.
4123 * @param pCtx Pointer to the guest-CPU context.
4124 * @param pCache Pointer to the VMCS cache.
4125 * @param pVM Pointer to the VM.
4126 * @param pVCpu Pointer to the VMCPU.
4127 */
4128DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
4129{
4130 uint32_t aParam[6];
4131 PHMGLOBLCPUINFO pCpu = NULL;
4132 RTHCPHYS HCPhysCpuPage = 0;
4133 int rc = VERR_INTERNAL_ERROR_5;
4134
4135 pCpu = HMR0GetCurrentCpu();
4136 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4137
4138#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4139 pCache->uPos = 1;
4140 pCache->interPD = PGMGetInterPaeCR3(pVM);
4141 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
4142#endif
4143
4144#ifdef VBOX_STRICT
4145 pCache->TestIn.HCPhysCpuPage = 0;
4146 pCache->TestIn.HCPhysVmcs = 0;
4147 pCache->TestIn.pCache = 0;
4148 pCache->TestOut.HCPhysVmcs = 0;
4149 pCache->TestOut.pCache = 0;
4150 pCache->TestOut.pCtx = 0;
4151 pCache->TestOut.eflags = 0;
4152#endif
4153
4154 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
4155 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
4156 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
4157 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
4158 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
4159 aParam[5] = 0;
4160
4161#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4162 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
4163 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
4164#endif
4165 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, 6, &aParam[0]);
4166
4167#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4168 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
4169 Assert(pCtx->dr[4] == 10);
4170 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
4171#endif
4172
4173#ifdef VBOX_STRICT
4174 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
4175 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4176 pVCpu->hm.s.vmx.HCPhysVmcs));
4177 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4178 pCache->TestOut.HCPhysVmcs));
4179 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
4180 pCache->TestOut.pCache));
4181 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
4182 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
4183 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
4184 pCache->TestOut.pCtx));
4185 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
4186#endif
4187 return rc;
4188}
4189
4190
4191/**
4192 * Initialize the VMCS-Read cache. The VMCS cache is used for 32-bit hosts
4193 * running 64-bit guests (except 32-bit Darwin which runs with 64-bit paging in
4194 * 32-bit mode) for 64-bit fields that cannot be accessed in 32-bit mode. Some
4195 * 64-bit fields -can- be accessed (those that have a 32-bit FULL & HIGH part).
4196 *
4197 * @returns VBox status code.
4198 * @param pVM Pointer to the VM.
4199 * @param pVCpu Pointer to the VMCPU.
4200 */
4201static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
4202{
4203#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
4204{ \
4205 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
4206 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
4207 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
4208 ++cReadFields; \
4209}
4210
4211 AssertPtr(pVM);
4212 AssertPtr(pVCpu);
4213 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4214 uint32_t cReadFields = 0;
4215
4216 /* Guest-natural selector base fields */
4217#if 0
4218 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
4219 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
4220 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
4221#endif
4222 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
4223 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
4224 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
4225 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
4226 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
4227 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
4228 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
4229 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
4230 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
4231 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
4232 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DR7);
4233 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
4234 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
4235#if 0
4236 /* Unused natural width guest-state fields. */
4237 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
4238 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
4239#endif
4240 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
4241 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
4242
4243 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
4244#if 0
4245 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
4246 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
4247 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
4248 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
4249 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
4250 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
4251 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
4252 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
4253 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
4254#endif
4255
4256 /* Natural width guest-state fields. */
4257 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
4258#if 0
4259 /* Currently unused field. */
4260 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
4261#endif
4262
4263 if (pVM->hm.s.fNestedPaging)
4264 {
4265 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
4266 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
4267 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
4268 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
4269 }
4270 else
4271 {
4272 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
4273 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
4274 }
4275
4276#undef VMXLOCAL_INIT_READ_CACHE_FIELD
4277 return VINF_SUCCESS;
4278}
4279
4280
4281/**
4282 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
4283 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
4284 * darwin, running 64-bit guests).
4285 *
4286 * @returns VBox status code.
4287 * @param pVCpu Pointer to the VMCPU.
4288 * @param idxField The VMCS field encoding.
4289 * @param u64Val 16, 32 or 64 bits value.
4290 */
4291VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4292{
4293 int rc;
4294 switch (idxField)
4295 {
4296 /*
4297 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
4298 */
4299 /* 64-bit Control fields. */
4300 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
4301 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
4302 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
4303 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
4304 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
4305 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
4306 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
4307 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
4308 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
4309 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
4310 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
4311 case VMX_VMCS64_CTRL_EPTP_FULL:
4312 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
4313 /* 64-bit Guest-state fields. */
4314 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
4315 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
4316 case VMX_VMCS64_GUEST_PAT_FULL:
4317 case VMX_VMCS64_GUEST_EFER_FULL:
4318 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
4319 case VMX_VMCS64_GUEST_PDPTE0_FULL:
4320 case VMX_VMCS64_GUEST_PDPTE1_FULL:
4321 case VMX_VMCS64_GUEST_PDPTE2_FULL:
4322 case VMX_VMCS64_GUEST_PDPTE3_FULL:
4323 /* 64-bit Host-state fields. */
4324 case VMX_VMCS64_HOST_FIELD_PAT_FULL:
4325 case VMX_VMCS64_HOST_FIELD_EFER_FULL:
4326 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
4327 {
4328 rc = VMXWriteVmcs32(idxField, u64Val);
4329 rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32));
4330 break;
4331 }
4332
4333 /*
4334 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
4335 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
4336 */
4337 /* Natural-width Guest-state fields. */
4338 case VMX_VMCS_GUEST_CR3:
4339 case VMX_VMCS_GUEST_ES_BASE:
4340 case VMX_VMCS_GUEST_CS_BASE:
4341 case VMX_VMCS_GUEST_SS_BASE:
4342 case VMX_VMCS_GUEST_DS_BASE:
4343 case VMX_VMCS_GUEST_FS_BASE:
4344 case VMX_VMCS_GUEST_GS_BASE:
4345 case VMX_VMCS_GUEST_LDTR_BASE:
4346 case VMX_VMCS_GUEST_TR_BASE:
4347 case VMX_VMCS_GUEST_GDTR_BASE:
4348 case VMX_VMCS_GUEST_IDTR_BASE:
4349 case VMX_VMCS_GUEST_DR7:
4350 case VMX_VMCS_GUEST_RSP:
4351 case VMX_VMCS_GUEST_RIP:
4352 case VMX_VMCS_GUEST_SYSENTER_ESP:
4353 case VMX_VMCS_GUEST_SYSENTER_EIP:
4354 {
4355 if (!(u64Val >> 32))
4356 {
4357 /* If this field is 64-bit, VT-x will zero out the top bits. */
4358 rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
4359 }
4360 else
4361 {
4362 /* Assert that only the 32->64 switcher case should ever come here. */
4363 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
4364 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
4365 }
4366 break;
4367 }
4368
4369 default:
4370 {
4371 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
4372 rc = VERR_INVALID_PARAMETER;
4373 break;
4374 }
4375 }
4376 AssertRCReturn(rc, rc);
4377 return rc;
4378}
4379
4380
4381/**
4382 * Queue up a VMWRITE by using the VMCS write cache. This is only used on 32-bit
4383 * hosts (except darwin) for 64-bit guests.
4384 *
4385 * @param pVCpu Pointer to the VMCPU.
4386 * @param idxField The VMCS field encoding.
4387 * @param u64Val 16, 32 or 64 bits value.
4388 */
4389VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4390{
4391 AssertPtr(pVCpu);
4392 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4393
4394 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
4395 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
4396
4397 /* Make sure there are no duplicates. */
4398 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
4399 {
4400 if (pCache->Write.aField[i] == idxField)
4401 {
4402 pCache->Write.aFieldVal[i] = u64Val;
4403 return VINF_SUCCESS;
4404 }
4405 }
4406
4407 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
4408 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
4409 pCache->Write.cValidEntries++;
4410 return VINF_SUCCESS;
4411}
4412
4413/* Enable later when the assembly code uses these as callbacks. */
4414#if 0
4415/*
4416 * Loads the VMCS write-cache into the CPU (by executing VMWRITEs).
4417 *
4418 * @param pVCpu Pointer to the VMCPU.
4419 * @param pCache Pointer to the VMCS cache.
4420 *
4421 * @remarks No-long-jump zone!!!
4422 */
4423VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache)
4424{
4425 AssertPtr(pCache);
4426 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
4427 {
4428 int rc = VMXWriteVmcs64(pCache->Write.aField[i], pCache->Write.aFieldVal[i]);
4429 AssertRC(rc);
4430 }
4431 pCache->Write.cValidEntries = 0;
4432}
4433
4434
4435/**
4436 * Stores the VMCS read-cache from the CPU (by executing VMREADs).
4437 *
4438 * @param pVCpu Pointer to the VMCPU.
4439 * @param pCache Pointer to the VMCS cache.
4440 *
4441 * @remarks No-long-jump zone!!!
4442 */
4443VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache)
4444{
4445 AssertPtr(pCache);
4446 for (uint32_t i = 0; i < pCache->Read.cValidEntries; i++)
4447 {
4448 int rc = VMXReadVmcs64(pCache->Read.aField[i], &pCache->Read.aFieldVal[i]);
4449 AssertRC(rc);
4450 }
4451}
4452#endif
4453#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
4454
4455
4456/**
4457 * Sets up the usage of TSC-offsetting and updates the VMCS. If offsetting is
4458 * not possible, cause VM-exits on RDTSC(P)s. Also sets up the VMX preemption
4459 * timer.
4460 *
4461 * @returns VBox status code.
4462 * @param pVCpu Pointer to the VMCPU.
4463 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4464 * out-of-sync. Make sure to update the required fields
4465 * before using them.
4466 * @remarks No-long-jump zone!!!
4467 */
4468static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4469{
4470 int rc = VERR_INTERNAL_ERROR_5;
4471 bool fOffsettedTsc = false;
4472 PVM pVM = pVCpu->CTX_SUFF(pVM);
4473 if (pVM->hm.s.vmx.fUsePreemptTimer)
4474 {
4475 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset);
4476
4477 /* Make sure the returned values have sane upper and lower boundaries. */
4478 uint64_t u64CpuHz = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
4479 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
4480 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
4481 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
4482
4483 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
4484 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);
4485 }
4486 else
4487 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset);
4488
4489 if (fOffsettedTsc)
4490 {
4491 uint64_t u64CurTSC = ASMReadTSC();
4492 if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
4493 {
4494 /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
4495 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
4496
4497 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
4498 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4499 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
4500 }
4501 else
4502 {
4503 /* VM-exit on RDTSC(P) as we would otherwise pass decreasing TSC values to the guest. */
4504 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
4505 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4506 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
4507 }
4508 }
4509 else
4510 {
4511 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
4512 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
4513 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4514 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
4515 }
4516}
4517
4518
4519/**
4520 * Determines if an exception is a contributory exception. Contributory
4521 * exceptions are ones which can cause double-faults. Page-fault is
4522 * intentionally not included here as it's a conditional contributory exception.
4523 *
4524 * @returns true if the exception is contributory, false otherwise.
4525 * @param uVector The exception vector.
4526 */
4527DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
4528{
4529 switch (uVector)
4530 {
4531 case X86_XCPT_GP:
4532 case X86_XCPT_SS:
4533 case X86_XCPT_NP:
4534 case X86_XCPT_TS:
4535 case X86_XCPT_DE:
4536 return true;
4537 default:
4538 break;
4539 }
4540 return false;
4541}
4542
4543
4544/**
4545 * Sets an event as a pending event to be injected into the guest.
4546 *
4547 * @param pVCpu Pointer to the VMCPU.
4548 * @param u32IntrInfo The VM-entry interruption-information field.
4549 * @param cbInstr The VM-entry instruction length in bytes (for software
4550 * interrupts, exceptions and privileged software
4551 * exceptions).
4552 * @param u32ErrCode The VM-entry exception error code.
4553 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
4554 * page-fault.
4555 */
4556DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntrInfo, uint32_t cbInstr, uint32_t u32ErrCode,
4557 RTGCUINTPTR GCPtrFaultAddress)
4558{
4559 Assert(!pVCpu->hm.s.Event.fPending);
4560 pVCpu->hm.s.Event.fPending = true;
4561 pVCpu->hm.s.Event.u64IntrInfo = u32IntrInfo;
4562 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
4563 pVCpu->hm.s.Event.cbInstr = cbInstr;
4564 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
4565}
4566
4567
4568/**
4569 * Sets a double-fault (#DF) exception as pending-for-injection into the VM.
4570 *
4571 * @param pVCpu Pointer to the VMCPU.
4572 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4573 * out-of-sync. Make sure to update the required fields
4574 * before using them.
4575 */
4576DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4577{
4578 /* Inject the double-fault. */
4579 uint32_t u32IntrInfo = X86_XCPT_DF | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
4580 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
4581 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
4582 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
4583}
4584
4585
4586/**
4587 * Handle a condition that occurred while delivering an event through the guest
4588 * IDT.
4589 *
4590 * @returns VBox status code (informational error codes included).
4591 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
4592 * @retval VINF_VMX_DOUBLE_FAULT if a #DF condition was detected and we ought to
4593 * continue execution of the guest which will delivery the #DF.
4594 * @retval VINF_EM_RESET if we detected a triple-fault condition.
4595 *
4596 * @param pVCpu Pointer to the VMCPU.
4597 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4598 * out-of-sync. Make sure to update the required fields
4599 * before using them.
4600 * @param pVmxTransient Pointer to the VMX transient structure.
4601 *
4602 * @remarks No-long-jump zone!!!
4603 */
4604static int hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
4605{
4606 int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
4607 AssertRC(rc);
4608 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
4609 {
4610 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
4611 AssertRCReturn(rc, rc);
4612
4613 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
4614 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntrInfo);
4615 uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
4616
4617 typedef enum
4618 {
4619 VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
4620 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
4621 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
4622 VMXREFLECTXCPT_NONE /* Nothing to reflect. */
4623 } VMXREFLECTXCPT;
4624
4625 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
4626 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
4627 if (uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
4628 {
4629 enmReflect = VMXREFLECTXCPT_XCPT;
4630#ifdef VBOX_STRICT
4631 if ( hmR0VmxIsContributoryXcpt(uIdtVector)
4632 && uExitVector == X86_XCPT_PF)
4633 {
4634 Log(("IDT: Contributory #PF uCR2=%#RGv\n", pMixedCtx->cr2));
4635 }
4636#endif
4637 if ( uExitVector == X86_XCPT_PF
4638 && uIdtVector == X86_XCPT_PF)
4639 {
4640 pVmxTransient->fVectoringPF = true;
4641 Log(("IDT: Vectoring #PF uCR2=%#RGv\n", pMixedCtx->cr2));
4642 }
4643 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
4644 && hmR0VmxIsContributoryXcpt(uExitVector)
4645 && ( hmR0VmxIsContributoryXcpt(uIdtVector)
4646 || uIdtVector == X86_XCPT_PF))
4647 {
4648 enmReflect = VMXREFLECTXCPT_DF;
4649 }
4650 else if (uIdtVector == X86_XCPT_DF)
4651 enmReflect = VMXREFLECTXCPT_TF;
4652 }
4653 else if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4654 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
4655 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
4656 {
4657 /*
4658 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and privileged software exception
4659 * (whatever they are) as they reoccur when restarting the instruction.
4660 */
4661 enmReflect = VMXREFLECTXCPT_XCPT;
4662 }
4663
4664 switch (enmReflect)
4665 {
4666 case VMXREFLECTXCPT_XCPT:
4667 {
4668 uint32_t u32ErrCode = 0;
4669 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo))
4670 {
4671 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
4672 AssertRCReturn(rc, rc);
4673 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
4674 }
4675
4676 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
4677 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INTR_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
4678 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
4679 rc = VINF_SUCCESS;
4680 Log(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntrInfo,
4681 pVCpu->hm.s.Event.u32ErrCode));
4682 break;
4683 }
4684
4685 case VMXREFLECTXCPT_DF:
4686 {
4687 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
4688 rc = VINF_VMX_DOUBLE_FAULT;
4689 Log(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo,
4690 uIdtVector, uExitVector));
4691 break;
4692 }
4693
4694 case VMXREFLECTXCPT_TF:
4695 {
4696 Log(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));
4697 rc = VINF_EM_RESET;
4698 break;
4699 }
4700
4701 default:
4702 Assert(rc == VINF_SUCCESS);
4703 break;
4704 }
4705 }
4706 Assert(rc == VINF_SUCCESS || rc == VINF_VMX_DOUBLE_FAULT || rc == VINF_EM_RESET);
4707 return rc;
4708}
4709
4710
4711/**
4712 * Saves the guest's CR0 register from the VMCS into the guest-CPU context.
4713 *
4714 * @returns VBox status code.
4715 * @param pVCpu Pointer to the VMCPU.
4716 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4717 * out-of-sync. Make sure to update the required fields
4718 * before using them.
4719 *
4720 * @remarks No-long-jump zone!!!
4721 */
4722static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4723{
4724 int rc = VINF_SUCCESS;
4725 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0))
4726 {
4727 uint32_t uVal = 0;
4728 uint32_t uShadow = 0;
4729 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal);
4730 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
4731 AssertRCReturn(rc, rc);
4732 uVal = (uShadow & pVCpu->hm.s.vmx.cr0_mask) | (uVal & ~pVCpu->hm.s.vmx.cr0_mask);
4733 CPUMSetGuestCR0(pVCpu, uVal);
4734 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR0;
4735 }
4736 return rc;
4737}
4738
4739
4740/**
4741 * Saves the guest's CR4 register from the VMCS into the guest-CPU context.
4742 *
4743 * @returns VBox status code.
4744 * @param pVCpu Pointer to the VMCPU.
4745 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4746 * out-of-sync. Make sure to update the required fields
4747 * before using them.
4748 *
4749 * @remarks No-long-jump zone!!!
4750 */
4751static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4752{
4753 int rc = VINF_SUCCESS;
4754 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4))
4755 {
4756 uint32_t uVal = 0;
4757 uint32_t uShadow = 0;
4758 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal);
4759 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
4760 AssertRCReturn(rc, rc);
4761 uVal = (uShadow & pVCpu->hm.s.vmx.cr4_mask) | (uVal & ~pVCpu->hm.s.vmx.cr4_mask);
4762 CPUMSetGuestCR4(pVCpu, uVal);
4763 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR4;
4764 }
4765 return rc;
4766}
4767
4768
4769/**
4770 * Saves the guest's RIP register from the VMCS into the guest-CPU context.
4771 *
4772 * @returns VBox status code.
4773 * @param pVCpu Pointer to the VMCPU.
4774 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4775 * out-of-sync. Make sure to update the required fields
4776 * before using them.
4777 *
4778 * @remarks No-long-jump zone!!!
4779 */
4780static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4781{
4782 int rc = VINF_SUCCESS;
4783 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP))
4784 {
4785 RTGCUINTREG uVal = 0;
4786 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &uVal);
4787 AssertRCReturn(rc, rc);
4788 pMixedCtx->rip = uVal;
4789 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RIP;
4790 }
4791 return rc;
4792}
4793
4794
4795/**
4796 * Saves the guest's RSP register from the VMCS into the guest-CPU context.
4797 *
4798 * @returns VBox status code.
4799 * @param pVCpu Pointer to the VMCPU.
4800 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4801 * out-of-sync. Make sure to update the required fields
4802 * before using them.
4803 *
4804 * @remarks No-long-jump zone!!!
4805 */
4806static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4807{
4808 int rc = VINF_SUCCESS;
4809 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RSP))
4810 {
4811 RTGCUINTREG uVal = 0;
4812 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &uVal);
4813 AssertRCReturn(rc, rc);
4814 pMixedCtx->rsp = uVal;
4815 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RSP;
4816 }
4817 return rc;
4818}
4819
4820
4821/**
4822 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
4823 *
4824 * @returns VBox status code.
4825 * @param pVCpu Pointer to the VMCPU.
4826 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4827 * out-of-sync. Make sure to update the required fields
4828 * before using them.
4829 *
4830 * @remarks No-long-jump zone!!!
4831 */
4832static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4833{
4834 int rc = VINF_SUCCESS;
4835 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS))
4836 {
4837 uint32_t uVal = 0;
4838 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal);
4839 AssertRCReturn(rc, rc);
4840 pMixedCtx->eflags.u32 = uVal;
4841
4842 /* Undo our real-on-v86-mode changes to eflags if necessary. */
4843 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4844 {
4845 PVM pVM = pVCpu->CTX_SUFF(pVM);
4846 Assert(pVM->hm.s.vmx.pRealModeTSS);
4847 Log(("Saving real-mode RFLAGS VT-x view=%#RX64\n", pMixedCtx->rflags.u64));
4848 pMixedCtx->eflags.Bits.u1VM = 0;
4849 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.eflags.Bits.u2IOPL;
4850 }
4851
4852 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RFLAGS;
4853 }
4854 return rc;
4855}
4856
4857
4858/**
4859 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
4860 * guest-CPU context.
4861 */
4862DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4863{
4864 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
4865 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
4866 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
4867 return rc;
4868}
4869
4870
4871/**
4872 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
4873 * from the guest-state area in the VMCS.
4874 *
4875 * @param pVCpu Pointer to the VMCPU.
4876 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4877 * out-of-sync. Make sure to update the required fields
4878 * before using them.
4879 *
4880 * @remarks No-long-jump zone!!!
4881 */
4882static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4883{
4884 uint32_t uIntrState = 0;
4885 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
4886 AssertRC(rc);
4887
4888 if (!uIntrState)
4889 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
4890 else
4891 {
4892 Assert( uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
4893 || uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
4894 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
4895 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */
4896 AssertRC(rc);
4897 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
4898 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
4899 }
4900}
4901
4902
4903/**
4904 * Saves the guest's activity state.
4905 *
4906 * @returns VBox status code.
4907 * @param pVCpu Pointer to the VMCPU.
4908 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4909 * out-of-sync. Make sure to update the required fields
4910 * before using them.
4911 *
4912 * @remarks No-long-jump zone!!!
4913 */
4914static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4915{
4916 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
4917 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_ACTIVITY_STATE;
4918 return VINF_SUCCESS;
4919}
4920
4921
4922/**
4923 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
4924 * the current VMCS into the guest-CPU context.
4925 *
4926 * @returns VBox status code.
4927 * @param pVCpu Pointer to the VMCPU.
4928 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4929 * out-of-sync. Make sure to update the required fields
4930 * before using them.
4931 *
4932 * @remarks No-long-jump zone!!!
4933 */
4934static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4935{
4936 int rc = VINF_SUCCESS;
4937 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
4938 {
4939 uint32_t u32Val = 0;
4940 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc);
4941 pMixedCtx->SysEnter.cs = u32Val;
4942 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR;
4943 }
4944
4945 RTGCUINTREG uGCVal = 0;
4946 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
4947 {
4948 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &uGCVal); AssertRCReturn(rc, rc);
4949 pMixedCtx->SysEnter.eip = uGCVal;
4950 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR;
4951 }
4952 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
4953 {
4954 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &uGCVal); AssertRCReturn(rc, rc);
4955 pMixedCtx->SysEnter.esp = uGCVal;
4956 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR;
4957 }
4958 return rc;
4959}
4960
4961
4962/**
4963 * Saves the guest FS_BASE MSRs from the current VMCS into the guest-CPU
4964 * context.
4965 *
4966 * @returns VBox status code.
4967 * @param pVCpu Pointer to the VMCPU.
4968 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4969 * out-of-sync. Make sure to update the required fields
4970 * before using them.
4971 *
4972 * @remarks No-long-jump zone!!!
4973 */
4974static int hmR0VmxSaveGuestFSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4975{
4976 int rc = VINF_SUCCESS;
4977 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_FS_BASE_MSR))
4978 {
4979 RTGCUINTREG uVal = 0;
4980 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_FS_BASE, &uVal); AssertRCReturn(rc, rc);
4981 pMixedCtx->fs.u64Base = uVal;
4982 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_FS_BASE_MSR;
4983 }
4984 return rc;
4985}
4986
4987
4988/**
4989 * Saves the guest GS_BASE MSRs from the current VMCS into the guest-CPU
4990 * context.
4991 *
4992 * @returns VBox status code.
4993 * @param pVCpu Pointer to the VMCPU.
4994 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4995 * out-of-sync. Make sure to update the required fields
4996 * before using them.
4997 *
4998 * @remarks No-long-jump zone!!!
4999 */
5000static int hmR0VmxSaveGuestGSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5001{
5002 int rc = VINF_SUCCESS;
5003 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GS_BASE_MSR))
5004 {
5005 RTGCUINTREG uVal = 0;
5006 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GS_BASE, &uVal); AssertRCReturn(rc, rc);
5007 pMixedCtx->gs.u64Base = uVal;
5008 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GS_BASE_MSR;
5009 }
5010 return rc;
5011}
5012
5013
5014/**
5015 * Saves the auto load/store'd guest MSRs from the current VMCS into the
5016 * guest-CPU context. Currently these are LSTAR, STAR, SFMASK and TSC_AUX.
5017 *
5018 * @returns VBox status code.
5019 * @param pVCpu Pointer to the VMCPU.
5020 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5021 * out-of-sync. Make sure to update the required fields
5022 * before using them.
5023 *
5024 * @remarks No-long-jump zone!!!
5025 */
5026static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5027{
5028 if (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)
5029 return VINF_SUCCESS;
5030
5031#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
5032 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++)
5033 {
5034 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
5035 pMsr += i;
5036 switch (pMsr->u32IndexMSR)
5037 {
5038 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;
5039 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;
5040 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;
5041 case MSR_K8_TSC_AUX: CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); break;
5042 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;
5043 case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit. */ break;
5044 default:
5045 {
5046 AssertFailed();
5047 return VERR_HM_UNEXPECTED_LD_ST_MSR;
5048 }
5049 }
5050 }
5051#endif
5052
5053 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS;
5054 return VINF_SUCCESS;
5055}
5056
5057
5058/**
5059 * Saves the guest control registers from the current VMCS into the guest-CPU
5060 * context.
5061 *
5062 * @returns VBox status code.
5063 * @param pVCpu Pointer to the VMCPU.
5064 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5065 * out-of-sync. Make sure to update the required fields
5066 * before using them.
5067 *
5068 * @remarks No-long-jump zone!!!
5069 */
5070static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5071{
5072 /* Guest CR0. Guest FPU. */
5073 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5074
5075 /* Guest CR4. */
5076 rc |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
5077 AssertRCReturn(rc, rc);
5078
5079 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
5080 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR3))
5081 {
5082 PVM pVM = pVCpu->CTX_SUFF(pVM);
5083 if ( pVM->hm.s.fNestedPaging
5084 && CPUMIsGuestPagingEnabledEx(pMixedCtx))
5085 {
5086 RTGCUINTREG uVal = 0;
5087 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &uVal);
5088 if (pMixedCtx->cr3 != uVal)
5089 {
5090 CPUMSetGuestCR3(pVCpu, uVal);
5091 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
5092 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
5093 }
5094
5095 /* We require EFER to check PAE mode. */
5096 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
5097
5098 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
5099 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR. */
5100 {
5101 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);
5102 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);
5103 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);
5104 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);
5105 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
5106 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
5107 }
5108 AssertRCReturn(rc, rc);
5109 }
5110
5111 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR3;
5112 }
5113 return rc;
5114}
5115
5116
5117/**
5118 * Reads a guest segment register from the current VMCS into the guest-CPU
5119 * context.
5120 *
5121 * @returns VBox status code.
5122 * @param pVCpu Pointer to the VMCPU.
5123 * @param idxSel Index of the selector in the VMCS.
5124 * @param idxLimit Index of the segment limit in the VMCS.
5125 * @param idxBase Index of the segment base in the VMCS.
5126 * @param idxAccess Index of the access rights of the segment in the VMCS.
5127 * @param pSelReg Pointer to the segment selector.
5128 *
5129 * @remarks No-long-jump zone!!!
5130 * @remarks Never call this function directly. Use the VMXLOCAL_READ_SEG() macro
5131 * as that takes care of whether to read from the VMCS cache or not.
5132 */
5133DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
5134 PCPUMSELREG pSelReg)
5135{
5136 uint32_t u32Val = 0;
5137 int rc = VMXReadVmcs32(idxSel, &u32Val);
5138 pSelReg->Sel = (uint16_t)u32Val;
5139 pSelReg->ValidSel = (uint16_t)u32Val;
5140 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
5141
5142 rc |= VMXReadVmcs32(idxLimit, &u32Val);
5143 pSelReg->u32Limit = u32Val;
5144
5145 RTGCUINTREG uGCVal = 0;
5146 rc |= VMXReadVmcsGstNByIdxVal(idxBase, &uGCVal);
5147 pSelReg->u64Base = uGCVal;
5148
5149 rc |= VMXReadVmcs32(idxAccess, &u32Val);
5150 pSelReg->Attr.u = u32Val;
5151 AssertRCReturn(rc, rc);
5152
5153 /*
5154 * If VT-x marks the segment as unusable, the rest of the attributes are undefined.
5155 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
5156 */
5157 if (pSelReg->Attr.u & HMVMX_SEL_UNUSABLE)
5158 {
5159 Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR);
5160 /** @todo r=ramshankar: This can't be right for CS, SS which have exceptions for
5161 * certain bits, they're not all undefined. Consider ORing
5162 * HMVMX_SEL_UNUSABLE instead? */
5163 pSelReg->Attr.u = HMVMX_SEL_UNUSABLE;
5164 }
5165 return rc;
5166}
5167
5168
5169/**
5170 * Saves the guest segment registers from the current VMCS into the guest-CPU
5171 * context.
5172 *
5173 * @returns VBox status code.
5174 * @param pVCpu Pointer to the VMCPU.
5175 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5176 * out-of-sync. Make sure to update the required fields
5177 * before using them.
5178 *
5179 * @remarks No-long-jump zone!!!
5180 */
5181static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5182{
5183#ifdef VMX_USE_CACHED_VMCS_ACCESSES
5184#define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5185 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5186 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5187#else
5188#define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5189 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5190 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5191#endif
5192
5193 int rc = VINF_SUCCESS;
5194
5195 /* Guest segment registers. */
5196 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SEGMENT_REGS))
5197 {
5198 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5199 rc |= VMXLOCAL_READ_SEG(CS, cs);
5200 rc |= VMXLOCAL_READ_SEG(SS, ss);
5201 rc |= VMXLOCAL_READ_SEG(DS, ds);
5202 rc |= VMXLOCAL_READ_SEG(ES, es);
5203 rc |= VMXLOCAL_READ_SEG(FS, fs);
5204 rc |= VMXLOCAL_READ_SEG(GS, gs);
5205 AssertRCReturn(rc, rc);
5206
5207 /* Restore segment attributes for real-on-v86 mode hack. */
5208 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5209 {
5210 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrCS.u;
5211 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrSS.u;
5212 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrDS.u;
5213 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrES.u;
5214 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrFS.u;
5215 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrGS.u;
5216 }
5217 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SEGMENT_REGS;
5218 }
5219
5220 return rc;
5221}
5222
5223
5224/**
5225 * Saves the guest descriptor table registers and task register from the current
5226 * VMCS into the guest-CPU context.
5227 *
5228 * @returns VBox status code.
5229 * @param pVCpu Pointer to the VMCPU.
5230 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5231 * out-of-sync. Make sure to update the required fields
5232 * before using them.
5233 *
5234 * @remarks No-long-jump zone!!!
5235 */
5236static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5237{
5238 int rc = VINF_SUCCESS;
5239
5240 /* Guest LDTR. */
5241 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_LDTR))
5242 {
5243 rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
5244 AssertRCReturn(rc, rc);
5245 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LDTR;
5246 }
5247
5248 /* Guest GDTR. */
5249 RTGCUINTREG uGCVal = 0;
5250 uint32_t u32Val = 0;
5251 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GDTR))
5252 {
5253 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &uGCVal);
5254 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5255 pMixedCtx->gdtr.pGdt = uGCVal;
5256 pMixedCtx->gdtr.cbGdt = u32Val;
5257 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GDTR;
5258 }
5259
5260 /* Guest IDTR. */
5261 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_IDTR))
5262 {
5263 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &uGCVal);
5264 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5265 pMixedCtx->idtr.pIdt = uGCVal;
5266 pMixedCtx->idtr.cbIdt = u32Val;
5267 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_IDTR;
5268 }
5269
5270 /* Guest TR. */
5271 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_TR))
5272 {
5273 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5274
5275 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
5276 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5277 rc |= VMXLOCAL_READ_SEG(TR, tr);
5278 AssertRCReturn(rc, rc);
5279 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_TR;
5280 }
5281 return rc;
5282}
5283
5284
5285/**
5286 * Saves the guest debug registers from the current VMCS into the guest-CPU
5287 * context.
5288 *
5289 * @returns VBox status code.
5290 * @param pVCpu Pointer to the VMCPU.
5291 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5292 * out-of-sync. Make sure to update the required fields
5293 * before using them.
5294 *
5295 * @remarks No-long-jump zone!!!
5296 */
5297static int hmR0VmxSaveGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5298{
5299 int rc = VINF_SUCCESS;
5300 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_DEBUG))
5301 {
5302 RTGCUINTREG uVal;
5303 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_DR7, &uVal); AssertRCReturn(rc, rc);
5304 pMixedCtx->dr[7] = uVal;
5305
5306 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_DEBUG;
5307 }
5308 return rc;
5309}
5310
5311
5312/**
5313 * Saves the guest APIC state from the currentl VMCS into the guest-CPU context.
5314 *
5315 * @returns VBox status code.
5316 * @param pVCpu Pointer to the VMCPU.
5317 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5318 * out-of-sync. Make sure to update the required fields
5319 * before using them.
5320 *
5321 * @remarks No-long-jump zone!!!
5322 */
5323static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5324{
5325 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
5326 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_APIC_STATE;
5327 return VINF_SUCCESS;
5328}
5329
5330
5331/**
5332 * Saves the entire guest state from the currently active VMCS into the
5333 * guest-CPU context. This essentially VMREADs all guest-data.
5334 *
5335 * @returns VBox status code.
5336 * @param pVCpu Pointer to the VMCPU.
5337 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5338 * out-of-sync. Make sure to update the required fields
5339 * before using them.
5340 */
5341static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5342{
5343 Assert(pVCpu);
5344 Assert(pMixedCtx);
5345
5346 if (pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL)
5347 return VINF_SUCCESS;
5348
5349 VMMRZCallRing3Disable(pVCpu);
5350 Assert(VMMR0IsLogFlushDisabled(pVCpu));
5351 LogFunc(("\n"));
5352
5353 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
5354 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5355
5356 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
5357 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5358
5359 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
5360 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5361
5362 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
5363 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5364
5365 rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx);
5366 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5367
5368 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
5369 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5370
5371 rc = hmR0VmxSaveGuestFSBaseMsr(pVCpu, pMixedCtx);
5372 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestFSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5373
5374 rc = hmR0VmxSaveGuestGSBaseMsr(pVCpu, pMixedCtx);
5375 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5376
5377 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
5378 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5379
5380 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
5381 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5382
5383 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
5384 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5385
5386 AssertMsg(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL,
5387 ("Missed guest state bits while saving state; residue %RX32\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
5388
5389 VMMRZCallRing3Enable(pVCpu);
5390 return rc;
5391}
5392
5393
5394/**
5395 * Check per-VM and per-VCPU force flag actions that require us to go back to
5396 * ring-3 for one reason or another.
5397 *
5398 * @returns VBox status code (information status code included).
5399 * @retval VINF_SUCCESS if we don't have any actions that require going back to
5400 * ring-3.
5401 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
5402 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
5403 * interrupts)
5404 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
5405 * all EMTs to be in ring-3.
5406 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
5407 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
5408 * to the EM loop.
5409 *
5410 * @param pVM Pointer to the VM.
5411 * @param pVCpu Pointer to the VMCPU.
5412 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5413 * out-of-sync. Make sure to update the required fields
5414 * before using them.
5415 */
5416static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5417{
5418 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5419
5420 int rc = VERR_INTERNAL_ERROR_5;
5421 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
5422 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
5423 | VMCPU_FF_REQUEST | VMCPU_FF_HM_UPDATE_CR3 | VMCPU_FF_HM_UPDATE_PAE_PDPES))
5424 {
5425 /* We need the control registers now, make sure the guest-CPU context is updated. */
5426 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
5427 AssertRCReturn(rc, rc);
5428
5429 /* Pending HM CR3 sync. */
5430 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
5431 {
5432 rc = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
5433 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3);
5434 }
5435 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
5436 {
5437 rc = PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5438 AssertRC(rc);
5439 }
5440
5441 /* Pending PGM C3 sync. */
5442 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
5443 {
5444 rc = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
5445 if (rc != VINF_SUCCESS)
5446 {
5447 AssertRC(rc);
5448 Log(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
5449 return rc;
5450 }
5451 }
5452
5453 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
5454 /* -XXX- what was that about single stepping? */
5455 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
5456 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
5457 {
5458 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
5459 rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
5460 Log(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
5461 return rc;
5462 }
5463
5464 /* Pending VM request packets, such as hardware interrupts. */
5465 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
5466 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
5467 {
5468 Log(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
5469 return VINF_EM_PENDING_REQUEST;
5470 }
5471
5472 /* Pending PGM pool flushes. */
5473 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
5474 {
5475 Log(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
5476 return VINF_PGM_POOL_FLUSH_PENDING;
5477 }
5478
5479 /* Pending DMA requests. */
5480 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
5481 {
5482 Log(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
5483 return VINF_EM_RAW_TO_R3;
5484 }
5485 }
5486
5487 /* Paranoia. */
5488 Assert(rc != VERR_EM_INTERPRETER);
5489 return VINF_SUCCESS;
5490}
5491
5492
5493/**
5494 * Converts any TRPM trap into a pending VMX event. This is typically used when
5495 * entering from ring-3 (not longjmp returns).
5496 *
5497 * @param pVCpu Pointer to the VMCPU.
5498 * @param pCtx Pointer to the guest-CPU context.
5499 */
5500static void hmR0VmxUpdatePendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
5501{
5502 if (!TRPMHasTrap(pVCpu))
5503 {
5504 Assert(!pVCpu->hm.s.Event.fPending);
5505 return;
5506 }
5507
5508 uint8_t uVector;
5509 TRPMEVENT enmTrpmEvent;
5510 RTGCUINT uErrCode;
5511 RTGCUINTPTR GCPtrFaultAddress;
5512 uint8_t cbInstr;
5513
5514 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
5515 AssertRC(rc);
5516
5517 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntrInfo. */
5518 uint32_t u32IntrInfo = uVector | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5519 if (enmTrpmEvent == TRPM_TRAP)
5520 {
5521 switch (uVector)
5522 {
5523 case X86_XCPT_BP:
5524 case X86_XCPT_OF:
5525 {
5526 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5527 break;
5528 }
5529
5530 case X86_XCPT_PF:
5531 case X86_XCPT_DF:
5532 case X86_XCPT_TS:
5533 case X86_XCPT_NP:
5534 case X86_XCPT_SS:
5535 case X86_XCPT_GP:
5536 case X86_XCPT_AC:
5537 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5538 /* no break! */
5539 default:
5540 {
5541 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5542 break;
5543 }
5544 }
5545 }
5546 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
5547 {
5548 if (uVector != X86_XCPT_NMI)
5549 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5550 else
5551 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5552 }
5553 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
5554 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5555 else
5556 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
5557
5558 rc = TRPMResetTrap(pVCpu);
5559 AssertRC(rc);
5560 Log(("Converting TRPM trap: u32IntrInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
5561 u32IntrInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
5562 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, uErrCode, GCPtrFaultAddress);
5563}
5564
5565
5566/**
5567 * Converts any pending VMX event into a TRPM trap. Typically used when leaving
5568 * VT-x to execute any instruction.
5569 *
5570 * @param pvCpu Pointer to the VMCPU.
5571 */
5572static void hmR0VmxUpdateTRPM(PVMCPU pVCpu)
5573{
5574 if (pVCpu->hm.s.Event.fPending)
5575 {
5576 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
5577 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntrInfo);
5578 bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo);
5579 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
5580
5581 /* If a trap was already pending, we did something wrong! */
5582 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
5583
5584 TRPMEVENT enmTrapType;
5585 switch (uVectorType)
5586 {
5587 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
5588 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
5589 enmTrapType = TRPM_HARDWARE_INT;
5590 break;
5591 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
5592 enmTrapType = TRPM_SOFTWARE_INT;
5593 break;
5594 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
5595 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
5596 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
5597 enmTrapType = TRPM_TRAP;
5598 break;
5599 default:
5600 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
5601 enmTrapType = TRPM_32BIT_HACK;
5602 break;
5603 }
5604
5605 Log(("Converting pending HM event to TRPM trap uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
5606 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
5607 AssertRC(rc);
5608
5609 if (fErrorCodeValid)
5610 TRPMSetErrorCode(pVCpu, uErrorCode);
5611 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5612 && uVector == X86_XCPT_PF)
5613 {
5614 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
5615 }
5616 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5617 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5618 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5619 {
5620 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5621 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
5622 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
5623 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
5624 }
5625 pVCpu->hm.s.Event.fPending = false;
5626 }
5627}
5628
5629
5630/**
5631 * Does the necessary state syncing before doing a longjmp to ring-3.
5632 *
5633 * @param pVM Pointer to the VM.
5634 * @param pVCpu Pointer to the VMCPU.
5635 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5636 * out-of-sync. Make sure to update the required fields
5637 * before using them.
5638 * @param rcExit The reason for exiting to ring-3. Can be
5639 * VINF_VMM_UNKNOWN_RING3_CALL.
5640 *
5641 * @remarks No-long-jmp zone!!!
5642 */
5643static void hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
5644{
5645 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
5646 Assert(VMMR0IsLogFlushDisabled(pVCpu));
5647
5648 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
5649 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL);
5650 AssertRC(rc);
5651
5652 /* Restore FPU state if necessary and resync on next R0 reentry .*/
5653 if (CPUMIsGuestFPUStateActive(pVCpu))
5654 {
5655 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
5656 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
5657 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
5658 }
5659
5660 /* Restore debug registers if necessary and resync on next R0 reentry. */
5661 if (CPUMIsGuestDebugStateActive(pVCpu))
5662 {
5663 CPUMR0SaveGuestDebugState(pVM, pVCpu, pMixedCtx, true /* save DR6 */);
5664 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
5665 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
5666 }
5667 else if (CPUMIsHyperDebugStateActive(pVCpu))
5668 {
5669 CPUMR0LoadHostDebugState(pVM, pVCpu);
5670 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
5671 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
5672 }
5673
5674 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
5675 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
5676 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
5677 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
5678 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
5679 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
5680}
5681
5682
5683/**
5684 * An action requires us to go back to ring-3. This function does the necessary
5685 * steps before we can safely return to ring-3. This is not the same as longjmps
5686 * to ring-3, this is voluntary.
5687 *
5688 * @param pVM Pointer to the VM.
5689 * @param pVCpu Pointer to the VMCPU.
5690 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5691 * out-of-sync. Make sure to update the required fields
5692 * before using them.
5693 * @param rcExit The reason for exiting to ring-3. Can be
5694 * VINF_VMM_UNKNOWN_RING3_CALL.
5695 */
5696static void hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
5697{
5698 Assert(pVM);
5699 Assert(pVCpu);
5700 Assert(pMixedCtx);
5701 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
5702
5703 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_GUEST_STATE))
5704 {
5705 /* We want to see what the guest-state was before VM-entry, don't resync here, as we won't continue guest execution. */
5706 return;
5707 }
5708 else if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
5709 {
5710 VMXGetActivateVMCS(&pVCpu->hm.s.vmx.lasterror.u64VMCSPhys);
5711 pVCpu->hm.s.vmx.lasterror.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
5712 pVCpu->hm.s.vmx.lasterror.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
5713 pVCpu->hm.s.vmx.lasterror.idCurrentCpu = RTMpCpuId();
5714 return;
5715 }
5716
5717 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
5718 VMMRZCallRing3Disable(pVCpu);
5719 Log(("hmR0VmxExitToRing3: rcExit=%d\n", rcExit));
5720
5721 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
5722 hmR0VmxUpdateTRPM(pVCpu);
5723
5724 /* Sync. the guest state. */
5725 hmR0VmxLongJmpToRing3(pVM, pVCpu, pMixedCtx, rcExit);
5726 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
5727
5728 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
5729 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
5730 | CPUM_CHANGED_LDTR
5731 | CPUM_CHANGED_GDTR
5732 | CPUM_CHANGED_IDTR
5733 | CPUM_CHANGED_TR
5734 | CPUM_CHANGED_HIDDEN_SEL_REGS);
5735
5736 /* On our way back from ring-3 the following needs to be done. */
5737 /** @todo This can change with preemption hooks. */
5738 if (rcExit == VINF_EM_RAW_INTERRUPT)
5739 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
5740 else
5741 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
5742
5743 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
5744 VMMRZCallRing3Enable(pVCpu);
5745}
5746
5747
5748/**
5749 * VMMRZCallRing3 callback wrapper which saves the guest state before we
5750 * longjump to ring-3 and possibly get preempted.
5751 *
5752 * @param pVCpu Pointer to the VMCPU.
5753 * @param enmOperation The operation causing the ring-3 longjump.
5754 * @param pvUser The user argument (pointer to the possibly
5755 * out-of-date guest-CPU context).
5756 *
5757 * @remarks Must never be called with @a enmOperation ==
5758 * VMMCALLRING3_VM_R0_ASSERTION.
5759 */
5760DECLCALLBACK(void) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
5761{
5762 /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion, */
5763 Assert(pVCpu);
5764 Assert(pvUser);
5765 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5766 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
5767
5768 VMMRZCallRing3Disable(pVCpu);
5769 Assert(VMMR0IsLogFlushDisabled(pVCpu));
5770 Log(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3\n"));
5771 hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser, VINF_VMM_UNKNOWN_RING3_CALL);
5772 VMMRZCallRing3Enable(pVCpu);
5773}
5774
5775
5776/**
5777 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
5778 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
5779 *
5780 * @returns VBox status code.
5781 * @param pVCpu Pointer to the VMCPU.
5782 */
5783DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
5784{
5785 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT))
5786 {
5787 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT))
5788 {
5789 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT;
5790 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
5791 AssertRC(rc);
5792 }
5793 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
5794}
5795
5796
5797/**
5798 * Injects any pending events into the guest if the guest is in a state to
5799 * receive them.
5800 *
5801 * @returns VBox status code (informational status codes included).
5802 * @param pVCpu Pointer to the VMCPU.
5803 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5804 * out-of-sync. Make sure to update the required fields
5805 * before using them.
5806 */
5807static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5808{
5809 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
5810 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
5811 bool fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
5812 bool fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
5813
5814 Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS));
5815 Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/
5816 && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
5817 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5818 Assert(!TRPMHasTrap(pVCpu));
5819
5820 int rc = VINF_SUCCESS;
5821 if (pVCpu->hm.s.Event.fPending) /* First, inject any pending HM events. */
5822 {
5823 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
5824 bool fInject = true;
5825 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
5826 {
5827 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
5828 AssertRCReturn(rc, rc);
5829 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
5830 if ( fBlockInt
5831 || fBlockSti
5832 || fBlockMovSS)
5833 {
5834 fInject = false;
5835 }
5836 }
5837 else if ( uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
5838 && ( fBlockMovSS
5839 || fBlockSti))
5840 {
5841 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
5842 fInject = false;
5843 }
5844
5845 if (fInject)
5846 {
5847 Log(("Injecting pending event\n"));
5848 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.cbInstr,
5849 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState);
5850 AssertRCReturn(rc, rc);
5851 pVCpu->hm.s.Event.fPending = false;
5852 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntReinject);
5853 }
5854 else
5855 hmR0VmxSetIntWindowExitVmcs(pVCpu);
5856 } /** @todo SMI. SMIs take priority over NMIs. */
5857 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */
5858 {
5859 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
5860 if ( !fBlockMovSS
5861 && !fBlockSti)
5862 {
5863 Log(("Injecting NMI\n"));
5864 RTGCUINTPTR uIntrInfo;
5865 uIntrInfo = X86_XCPT_NMI | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5866 uIntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5867 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, uIntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */,
5868 0 /* GCPtrFaultAddress */, &uIntrState);
5869 AssertRCReturn(rc, rc);
5870 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
5871 }
5872 else
5873 hmR0VmxSetIntWindowExitVmcs(pVCpu);
5874 }
5875 else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
5876 {
5877 /* Check if there are guest external interrupts (PIC/APIC) pending and inject them if the guest can receive them. */
5878 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
5879 AssertRCReturn(rc, rc);
5880 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
5881 if ( !fBlockInt
5882 && !fBlockSti
5883 && !fBlockMovSS)
5884 {
5885 uint8_t u8Interrupt;
5886 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5887 if (RT_SUCCESS(rc))
5888 {
5889 Log(("Injecting interrupt u8Interrupt=%#x\n", u8Interrupt));
5890 uint32_t u32IntrInfo = u8Interrupt | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5891 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5892 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */,
5893 0 /* GCPtrFaultAddress */, &uIntrState);
5894 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
5895 }
5896 else
5897 {
5898 /** @todo Does this actually happen? If not turn it into an assertion. */
5899 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
5900 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
5901 rc = VINF_SUCCESS;
5902 }
5903 }
5904 else
5905 hmR0VmxSetIntWindowExitVmcs(pVCpu);
5906 }
5907
5908 /*
5909 * Delivery pending debug exception if the guest is single-stepping. The interruptibility-state could have been changed by
5910 * hmR0VmxInjectEventVmcs() (e.g. real-on-v86 injecting software interrupts), re-evaluate it and set the BS bit.
5911 */
5912 fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
5913 fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
5914 int rc2 = VINF_SUCCESS;
5915 if ( fBlockSti
5916 || fBlockMovSS)
5917 {
5918 if (!DBGFIsStepping(pVCpu))
5919 {
5920 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
5921 if (pMixedCtx->eflags.Bits.u1TF) /* We don't have any IA32_DEBUGCTL MSR for guests. Treat as all bits 0. */
5922 {
5923 /*
5924 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD, VMX_EXIT_MTF
5925 * VMX_EXIT_APIC_WRITE, VMX_EXIT_VIRTUALIZED_EOI. See Intel spec. 27.3.4 "Saving Non-Register State".
5926 */
5927 rc2 = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
5928 }
5929 }
5930 else
5931 {
5932 /* We are single-stepping in the hypervisor debugger, clear interrupt inhibition as setting the BS bit would mean
5933 delivering a #DB to the guest upon VM-entry when it shouldn't be. */
5934 uIntrState = 0;
5935 }
5936 }
5937
5938 /*
5939 * There's no need to clear the VM entry-interruption information field here if we're not injecting anything.
5940 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5941 */
5942 rc2 |= hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
5943 AssertRC(rc2);
5944
5945 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
5946 return rc;
5947}
5948
5949
5950/**
5951 * Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
5952 *
5953 * @param pVCpu Pointer to the VMCPU.
5954 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5955 * out-of-sync. Make sure to update the required fields
5956 * before using them.
5957 */
5958DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5959{
5960 uint32_t u32IntrInfo = X86_XCPT_UD | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5961 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5962}
5963
5964
5965/**
5966 * Injects a double-fault (#DF) exception into the VM.
5967 *
5968 * @returns VBox status code (informational status code included).
5969 * @param pVCpu Pointer to the VMCPU.
5970 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5971 * out-of-sync. Make sure to update the required fields
5972 * before using them.
5973 */
5974DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t *puIntrState)
5975{
5976 uint32_t u32IntrInfo = X86_XCPT_DF | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5977 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5978 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5979 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
5980 puIntrState);
5981}
5982
5983
5984/**
5985 * Sets a debug (#DB) exception as pending-for-injection into the VM.
5986 *
5987 * @param pVCpu Pointer to the VMCPU.
5988 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5989 * out-of-sync. Make sure to update the required fields
5990 * before using them.
5991 */
5992DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5993{
5994 uint32_t u32IntrInfo = X86_XCPT_DB | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5995 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5996 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5997}
5998
5999
6000/**
6001 * Sets an overflow (#OF) exception as pending-for-injection into the VM.
6002 *
6003 * @param pVCpu Pointer to the VMCPU.
6004 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6005 * out-of-sync. Make sure to update the required fields
6006 * before using them.
6007 * @param cbInstr The value of RIP that is to be pushed on the guest
6008 * stack.
6009 */
6010DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
6011{
6012 uint32_t u32IntrInfo = X86_XCPT_OF | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
6013 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6014 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6015}
6016
6017
6018/**
6019 * Injects a general-protection (#GP) fault into the VM.
6020 *
6021 * @returns VBox status code (informational status code included).
6022 * @param pVCpu Pointer to the VMCPU.
6023 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6024 * out-of-sync. Make sure to update the required fields
6025 * before using them.
6026 * @param u32ErrorCode The error code associated with the #GP.
6027 */
6028DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
6029 uint32_t *puIntrState)
6030{
6031 uint32_t u32IntrInfo = X86_XCPT_GP | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
6032 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6033 if (fErrorCodeValid)
6034 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6035 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
6036 puIntrState);
6037}
6038
6039
6040/**
6041 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
6042 *
6043 * @param pVCpu Pointer to the VMCPU.
6044 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6045 * out-of-sync. Make sure to update the required fields
6046 * before using them.
6047 * @param uVector The software interrupt vector number.
6048 * @param cbInstr The value of RIP that is to be pushed on the guest
6049 * stack.
6050 */
6051DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
6052{
6053 uint32_t u32IntrInfo = uVector | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
6054 if ( uVector == X86_XCPT_BP
6055 || uVector == X86_XCPT_OF)
6056 {
6057 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6058 }
6059 else
6060 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6061 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6062}
6063
6064
6065/**
6066 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
6067 * stack.
6068 *
6069 * @returns VBox status code (information status code included).
6070 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
6071 * @param pVM Pointer to the VM.
6072 * @param pMixedCtx Pointer to the guest-CPU context.
6073 * @param uValue The value to push to the guest stack.
6074 */
6075DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
6076{
6077 /*
6078 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
6079 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
6080 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
6081 */
6082 if (pMixedCtx->sp == 1)
6083 return VINF_EM_RESET;
6084 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
6085 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
6086 AssertRCReturn(rc, rc);
6087 return rc;
6088}
6089
6090
6091/**
6092 * Injects an event into the guest upon VM-entry by updating the relevant fields
6093 * in the VM-entry area in the VMCS.
6094 *
6095 * @returns VBox status code (informational error codes included).
6096 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
6097 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
6098 *
6099 * @param pVCpu Pointer to the VMCPU.
6100 * @param pMixedCtx Pointer to the guest-CPU context. The data may
6101 * be out-of-sync. Make sure to update the required
6102 * fields before using them.
6103 * @param u64IntrInfo The VM-entry interruption-information field.
6104 * @param cbInstr The VM-entry instruction length in bytes (for
6105 * software interrupts, exceptions and privileged
6106 * software exceptions).
6107 * @param u32ErrCode The VM-entry exception error code.
6108 * @param GCPtrFaultAddress The page-fault address for #PF exceptions.
6109 * @param puIntrState Pointer to the current guest interruptibility-state.
6110 * This interruptibility-state will be updated if
6111 * necessary. This cannot not be NULL.
6112 *
6113 * @remarks No-long-jump zone!!!
6114 * @remarks Requires CR0!
6115 */
6116static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
6117 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState)
6118{
6119 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
6120 AssertMsg(u64IntrInfo >> 32 == 0, ("%#RX64\n", u64IntrInfo));
6121 Assert(puIntrState);
6122 uint32_t u32IntrInfo = (uint32_t)u64IntrInfo;
6123
6124 const uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntrInfo);
6125 const uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo);
6126
6127 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
6128 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6129 || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
6130
6131 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
6132
6133 /* We require CR0 to check if the guest is in real-mode. */
6134 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6135 AssertRCReturn(rc, rc);
6136
6137 /*
6138 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
6139 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
6140 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
6141 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
6142 */
6143 if (CPUMIsGuestInRealModeEx(pMixedCtx))
6144 {
6145 PVM pVM = pVCpu->CTX_SUFF(pVM);
6146 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
6147 {
6148 Assert(PDMVmmDevHeapIsEnabled(pVM));
6149 Assert(pVM->hm.s.vmx.pRealModeTSS);
6150
6151 /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */
6152 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6153 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
6154 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
6155 AssertRCReturn(rc, rc);
6156 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP);
6157
6158 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
6159 const size_t cbIdtEntry = 4;
6160 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
6161 {
6162 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
6163 if (uVector == X86_XCPT_DF)
6164 return VINF_EM_RESET;
6165 else if (uVector == X86_XCPT_GP)
6166 {
6167 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
6168 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, puIntrState);
6169 }
6170
6171 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
6172 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
6173 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, puIntrState);
6174 }
6175
6176 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
6177 uint16_t uGuestIp = pMixedCtx->ip;
6178 if (VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
6179 {
6180 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
6181 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
6182 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6183 }
6184 else if (VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
6185 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6186
6187 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
6188 uint16_t offIdtEntry = 0;
6189 RTSEL selIdtEntry = 0;
6190 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
6191 rc = PGMPhysSimpleReadGCPhys(pVM, &offIdtEntry, GCPhysIdtEntry, sizeof(offIdtEntry));
6192 rc |= PGMPhysSimpleReadGCPhys(pVM, &selIdtEntry, GCPhysIdtEntry + 2, sizeof(selIdtEntry));
6193 AssertRCReturn(rc, rc);
6194
6195 /* Construct the stack frame for the interrupt/exception handler. */
6196 rc = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
6197 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
6198 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
6199 AssertRCReturn(rc, rc);
6200
6201 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
6202 if (rc == VINF_SUCCESS)
6203 {
6204 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
6205 pMixedCtx->rip = offIdtEntry;
6206 pMixedCtx->cs.Sel = selIdtEntry;
6207 pMixedCtx->cs.u64Base = selIdtEntry << cbIdtEntry;
6208 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
6209 && uVector == X86_XCPT_PF)
6210 {
6211 pMixedCtx->cr2 = GCPtrFaultAddress;
6212 }
6213 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS
6214 | HM_CHANGED_GUEST_RIP
6215 | HM_CHANGED_GUEST_RFLAGS
6216 | HM_CHANGED_GUEST_RSP;
6217
6218 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
6219 if (*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
6220 {
6221 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6222 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
6223 Log(("Clearing inhibition due to STI.\n"));
6224 *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
6225 }
6226 Log(("Injecting real-mode: u32IntrInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntrInfo, u32ErrCode, cbInstr));
6227 }
6228 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
6229 return rc;
6230 }
6231 else
6232 {
6233 /*
6234 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
6235 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
6236 */
6237 u32IntrInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6238 }
6239 }
6240
6241 /* Validate. */
6242 Assert(VMX_EXIT_INTERRUPTION_INFO_VALID(u32IntrInfo)); /* Bit 31 (Valid bit) must be set by caller. */
6243 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK(u32IntrInfo)); /* Bit 12 MBZ. */
6244 Assert(!(u32IntrInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
6245
6246 /* Inject. */
6247 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntrInfo);
6248 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntrInfo))
6249 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
6250 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
6251
6252 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
6253 && uVector == X86_XCPT_PF)
6254 {
6255 pMixedCtx->cr2 = GCPtrFaultAddress;
6256 }
6257 Log(("Injecting u32IntrInfo=%#x u32ErrCode=%#x cbInstr=%#x uCR2=%#RGv\n", u32IntrInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
6258
6259 AssertRCReturn(rc, rc);
6260 return rc;
6261}
6262
6263
6264/**
6265 * Enters the VT-x session.
6266 *
6267 * @returns VBox status code.
6268 * @param pVM Pointer to the VM.
6269 * @param pVCpu Pointer to the VMCPU.
6270 * @param pCpu Pointer to the CPU info struct.
6271 */
6272VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
6273{
6274 AssertPtr(pVM);
6275 AssertPtr(pVCpu);
6276 Assert(pVM->hm.s.vmx.fSupported);
6277 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6278 NOREF(pCpu);
6279
6280 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6281
6282 /* Make sure we're in VMX root mode. */
6283 RTCCUINTREG u32HostCR4 = ASMGetCR4();
6284 if (!(u32HostCR4 & X86_CR4_VMXE))
6285 {
6286 LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
6287 return VERR_VMX_X86_CR4_VMXE_CLEARED;
6288 }
6289
6290 /* Load the active VMCS as the current one. */
6291 int rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
6292 if (RT_FAILURE(rc))
6293 return rc;
6294
6295 /** @todo this will change with preemption hooks where can can VMRESUME as long
6296 * as we're no preempted. */
6297 pVCpu->hm.s.fResumeVM = false;
6298 return VINF_SUCCESS;
6299}
6300
6301
6302/**
6303 * Leaves the VT-x session.
6304 *
6305 * @returns VBox status code.
6306 * @param pVM Pointer to the VM.
6307 * @param pVCpu Pointer to the VMCPU.
6308 * @param pCtx Pointer to the guest-CPU context.
6309 */
6310VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
6311{
6312 AssertPtr(pVCpu);
6313 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6314 NOREF(pVM);
6315 NOREF(pCtx);
6316
6317 /** @todo this will change with preemption hooks where we only VMCLEAR when
6318 * we are actually going to be preempted, not all the time like we
6319 * currently do. */
6320 /*
6321 * Sync the current VMCS (writes back internal data back into the VMCS region in memory)
6322 * and mark the VMCS launch-state as "clear".
6323 */
6324 int rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
6325 return rc;
6326}
6327
6328
6329/**
6330 * Saves the host state in the VMCS host-state.
6331 * Sets up the VM-exit MSR-load area.
6332 *
6333 * The CPU state will be loaded from these fields on every successful VM-exit.
6334 *
6335 * @returns VBox status code.
6336 * @param pVM Pointer to the VM.
6337 * @param pVCpu Pointer to the VMCPU.
6338 *
6339 * @remarks No-long-jump zone!!!
6340 */
6341VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
6342{
6343 AssertPtr(pVM);
6344 AssertPtr(pVCpu);
6345 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6346
6347 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6348
6349 /* Nothing to do if the host-state-changed flag isn't set. This will later be optimized when preemption hooks are in place. */
6350 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT))
6351 return VINF_SUCCESS;
6352
6353 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
6354 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6355
6356 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
6357 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6358
6359 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
6360 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6361
6362 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
6363 return rc;
6364}
6365
6366
6367/**
6368 * Loads the guest state into the VMCS guest-state area. The CPU state will be
6369 * loaded from these fields on every successful VM-entry.
6370 *
6371 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas.
6372 * Sets up the VM-entry controls.
6373 * Sets up the appropriate VMX non-root function to execute guest code based on
6374 * the guest CPU mode.
6375 *
6376 * @returns VBox status code.
6377 * @param pVM Pointer to the VM.
6378 * @param pVCpu Pointer to the VMCPU.
6379 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6380 * out-of-sync. Make sure to update the required fields
6381 * before using them.
6382 *
6383 * @remarks No-long-jump zone!!!
6384 */
6385VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6386{
6387 AssertPtr(pVM);
6388 AssertPtr(pVCpu);
6389 AssertPtr(pMixedCtx);
6390 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6391
6392 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6393
6394 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
6395
6396 /* Determine real-on-v86 mode. */
6397 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
6398 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
6399 && CPUMIsGuestInRealModeEx(pMixedCtx))
6400 {
6401 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
6402 }
6403
6404 /*
6405 * Load the guest-state into the VMCS.
6406 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
6407 * Ideally, assert that the cross-dependent bits are up to date at the point of using it.
6408 */
6409 int rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
6410 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6411
6412 rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
6413 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6414
6415 rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
6416 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6417
6418 rc = hmR0VmxLoadGuestControlRegs(pVCpu, pMixedCtx);
6419 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestControlRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6420
6421 /* Must be done after CR0 is loaded (strict builds require CR0 for segment register validation checks). */
6422 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
6423 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6424
6425 rc = hmR0VmxLoadGuestDebugRegs(pVCpu, pMixedCtx);
6426 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestDebugRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6427
6428 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
6429 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6430
6431 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
6432 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6433
6434 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
6435 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestGprs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6436
6437 rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
6438 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6439
6440 AssertMsg(!pVCpu->hm.s.fContextUseFlags,
6441 ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n",
6442 pVM, pVCpu, pVCpu->hm.s.fContextUseFlags));
6443
6444 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
6445 return rc;
6446}
6447
6448
6449/**
6450 * Does the preparations before executing guest code in VT-x.
6451 *
6452 * This may cause longjmps to ring-3 and may even result in rescheduling to the
6453 * recompiler. We must be cautious what we do here regarding committing
6454 * guest-state information into the the VMCS assuming we assuredly execute the
6455 * guest in VT-x. If we fall back to the recompiler after updating VMCS and
6456 * clearing the common-state (TRPM/forceflags), we must undo those changes so
6457 * that the recompiler can (and should) use them when it resumes guest
6458 * execution. Otherwise such operations must be done when we can no longer
6459 * exit to ring-3.
6460 *
6461 * @returns VBox status code (informational status codes included).
6462 * @retval VINF_SUCCESS if we can proceed with running the guest.
6463 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a double-fault
6464 * into the guest.
6465 * @retval VINF_* scheduling changes, we have to go back to ring-3.
6466 *
6467 * @param pVM Pointer to the VM.
6468 * @param pVCpu Pointer to the VMCPU.
6469 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6470 * out-of-sync. Make sure to update the required fields
6471 * before using them.
6472 * @param pVmxTransient Pointer to the VMX transient structure.
6473 *
6474 * @remarks Called with preemption disabled.
6475 */
6476DECLINLINE(int) hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6477{
6478 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6479
6480#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
6481 PGMRZDynMapFlushAutoSet(pVCpu);
6482#endif
6483
6484 /* Check force flag actions that might require us to go back to ring-3. */
6485 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
6486 if (rc != VINF_SUCCESS)
6487 return rc;
6488
6489 /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
6490 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
6491 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
6492 {
6493 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
6494 RTGCPHYS GCPhysApicBase;
6495 GCPhysApicBase = pMixedCtx->msrApicBase;
6496 GCPhysApicBase &= PAGE_BASE_GC_MASK;
6497
6498 /* Unalias any existing mapping. */
6499 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
6500 AssertRCReturn(rc, rc);
6501
6502 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
6503 Log(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));
6504 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
6505 AssertRCReturn(rc, rc);
6506
6507 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
6508 }
6509
6510#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
6511 /* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */
6512 pVmxTransient->uEFlags = ASMIntDisableFlags();
6513 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
6514 {
6515 ASMSetFlags(pVmxTransient->uEFlags);
6516 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
6517 /* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */
6518 return VINF_EM_RAW_INTERRUPT;
6519 }
6520 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
6521 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
6522#endif
6523
6524 /*
6525 * Evaluates and injects any pending events, toggling force-flags and updating the guest-interruptibility
6526 * state (interrupt shadow) in the VMCS. This -can- potentially be reworked to be done before disabling
6527 * interrupts and handle returning to ring-3 afterwards, but requires very careful state restoration.
6528 */
6529 /** @todo Rework event evaluation and injection to be complete separate. */
6530 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx);
6531 AssertRCReturn(rc, rc);
6532 return rc;
6533}
6534
6535
6536/**
6537 * Prepares to run guest code in VT-x and we've committed to doing so. This
6538 * means there is no backing out to ring-3 or anywhere else at this
6539 * point.
6540 *
6541 * @param pVM Pointer to the VM.
6542 * @param pVCpu Pointer to the VMCPU.
6543 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6544 * out-of-sync. Make sure to update the required fields
6545 * before using them.
6546 * @param pVmxTransient Pointer to the VMX transient structure.
6547 *
6548 * @remarks Called with preemption disabled.
6549 * @remarks No-long-jump zone!!!
6550 */
6551DECLINLINE(void) hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6552{
6553 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6554 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6555
6556#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
6557 /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */
6558 pVmxTransient->uEFlags = ASMIntDisableFlags();
6559 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
6560#endif
6561
6562 /* Load the required guest state bits (for guest-state changes in the inner execution loop). */
6563 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT));
6564 Log4(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));
6565#ifdef HMVMX_SYNC_FULL_GUEST_STATE
6566 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
6567#endif
6568 int rc = VINF_SUCCESS;
6569 if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP)
6570 {
6571 rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
6572 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
6573 }
6574 else if (pVCpu->hm.s.fContextUseFlags)
6575 {
6576 rc = VMXR0LoadGuestState(pVM, pVCpu, pMixedCtx);
6577 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
6578 }
6579 AssertRC(rc);
6580 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
6581
6582 /* Cache the TPR-shadow for checking on every VM-exit if it might have changed. */
6583 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
6584 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
6585
6586 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
6587 || HMR0GetCurrentCpu()->idCpu != pVCpu->hm.s.idLastCpu)
6588 {
6589 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pMixedCtx);
6590 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
6591 }
6592
6593 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
6594 hmR0VmxFlushTaggedTlb(pVCpu); /* Invalidate the appropriate guest entries from the TLB. */
6595 Assert(HMR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu);
6596
6597 /*
6598 * TPR patching (only active for 32-bit guests on 64-bit capable CPUs) when the CPU does not supported virtualizing
6599 * APIC accesses feature (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).
6600 */
6601 if (pVM->hm.s.fTPRPatchingActive)
6602 {
6603 Assert(!CPUMIsGuestInLongMode(pVCpu));
6604
6605 /* Need guest's LSTAR MSR (which is part of the auto load/store MSRs in the VMCS), ensure we have the updated one. */
6606 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
6607 AssertRC(rc);
6608
6609 /* The patch code uses the LSTAR as it's not used by a guest in 32-bit mode implicitly (i.e. SYSCALL is 64-bit only). */
6610 pVmxTransient->u64LStarMsr = ASMRdMsr(MSR_K8_LSTAR);
6611 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR); /* pMixedCtx->msrLSTAR contains the guest's TPR,
6612 see hmR0VmxLoadGuestApicState(). */
6613 }
6614
6615#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
6616 /*
6617 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
6618 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
6619 */
6620 if ( (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
6621 && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
6622 {
6623 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
6624 uint64_t u64HostTscAux = 0;
6625 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux);
6626 AssertRC(rc2);
6627 ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux);
6628 }
6629#endif
6630
6631 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
6632 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
6633 to start executing. */
6634}
6635
6636
6637/**
6638 * Performs some essential restoration of state after running guest code in
6639 * VT-x.
6640 *
6641 * @param pVM Pointer to the VM.
6642 * @param pVCpu Pointer to the VMCPU.
6643 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6644 * out-of-sync. Make sure to update the required fields
6645 * before using them.
6646 * @param pVmxTransient Pointer to the VMX transient structure.
6647 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
6648 *
6649 * @remarks Called with interrupts disabled.
6650 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
6651 * unconditionally when it is safe to do so.
6652 */
6653DECLINLINE(void) hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
6654{
6655 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6656 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
6657
6658 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
6659 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
6660 pVCpu->hm.s.vmx.fUpdatedGuestState = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */
6661 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
6662 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
6663
6664 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
6665 {
6666#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
6667 /* Restore host's TSC_AUX. */
6668 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
6669 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
6670#endif
6671 /** @todo Find a way to fix hardcoding a guestimate. */
6672 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC()
6673 + pVCpu->hm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
6674 }
6675
6676 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
6677 Assert(!(ASMGetFlags() & X86_EFL_IF));
6678 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
6679
6680 /* Restore the effects of TPR patching if any. */
6681 if (pVM->hm.s.fTPRPatchingActive)
6682 {
6683 int rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
6684 AssertRC(rc);
6685 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); /* MSR_K8_LSTAR contains the guest TPR. */
6686 ASMWrMsr(MSR_K8_LSTAR, pVmxTransient->u64LStarMsr);
6687 }
6688
6689 ASMSetFlags(pVmxTransient->uEFlags); /* Enable interrupts. */
6690 pVCpu->hm.s.fResumeVM = true; /* Use VMRESUME instead of VMLAUNCH in the next run. */
6691
6692 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
6693 uint32_t uExitReason;
6694 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
6695 rc |= hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
6696 AssertRC(rc);
6697 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
6698 pVmxTransient->fVMEntryFailed = !!VMX_ENTRY_INTERRUPTION_INFO_VALID(pVmxTransient->uEntryIntrInfo);
6699
6700 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pMixedCtx);
6701 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
6702
6703 /* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */
6704 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
6705 {
6706 Log(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));
6707 return;
6708 }
6709
6710 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
6711 {
6712 /* Update the guest interruptibility-state from the VMCS. */
6713 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
6714#if defined(HMVMX_SYNC_FULL_GUEST_STATE) || defined(HMVMX_SAVE_FULL_GUEST_STATE)
6715 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6716 AssertRC(rc);
6717#endif
6718 /*
6719 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
6720 * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3; also why
6721 * we do it outside of hmR0VmxSaveGuestState() which must never cause longjmps.
6722 */
6723 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
6724 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
6725 {
6726 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
6727 AssertRC(rc);
6728 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
6729 }
6730 }
6731}
6732
6733
6734/**
6735 * Runs the guest code using VT-x.
6736 *
6737 * @returns VBox status code.
6738 * @param pVM Pointer to the VM.
6739 * @param pVCpu Pointer to the VMCPU.
6740 * @param pCtx Pointer to the guest-CPU context.
6741 *
6742 * @remarks Called with preemption disabled.
6743 */
6744VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
6745{
6746 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6747 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6748
6749 VMXTRANSIENT VmxTransient;
6750 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
6751 int rc = VERR_INTERNAL_ERROR_5;
6752 uint32_t cLoops = 0;
6753 hmR0VmxUpdatePendingEvent(pVCpu, pCtx);
6754
6755 for (;; cLoops++)
6756 {
6757 Assert(!HMR0SuspendPending());
6758 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
6759 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
6760 (unsigned)RTMpCpuId(), cLoops));
6761
6762 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
6763 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
6764 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
6765 if (rc != VINF_SUCCESS)
6766 break;
6767
6768 /*
6769 * No longjmps to ring-3 from this point on!!!
6770 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
6771 * This also disables flushing of the R0-logger instance (if any).
6772 */
6773 VMMRZCallRing3Disable(pVCpu);
6774 VMMRZCallRing3RemoveNotification(pVCpu);
6775 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
6776
6777 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
6778 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
6779
6780 /*
6781 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
6782 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!!
6783 */
6784 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
6785 if (RT_UNLIKELY(rc != VINF_SUCCESS)) /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
6786 {
6787 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
6788 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
6789 return rc;
6790 }
6791
6792 /* Handle the VM-exit. */
6793 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
6794 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
6795 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
6796 HMVMX_START_EXIT_DISPATCH_PROF();
6797#ifdef HMVMX_USE_FUNCTION_TABLE
6798 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
6799#else
6800 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
6801#endif
6802 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
6803 if (rc != VINF_SUCCESS)
6804 break;
6805 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
6806 {
6807 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
6808 rc = VINF_EM_RAW_INTERRUPT;
6809 break;
6810 }
6811 }
6812
6813 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
6814 if (rc == VERR_EM_INTERPRETER)
6815 rc = VINF_EM_RAW_EMULATE_INSTR;
6816 else if (rc == VINF_EM_RESET)
6817 rc = VINF_EM_TRIPLE_FAULT;
6818 hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rc);
6819 return rc;
6820}
6821
6822
6823#ifndef HMVMX_USE_FUNCTION_TABLE
6824DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
6825{
6826 int rc;
6827 switch (rcReason)
6828 {
6829 case VMX_EXIT_EPT_MISCONFIG: rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); break;
6830 case VMX_EXIT_EPT_VIOLATION: rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); break;
6831 case VMX_EXIT_IO_INSTR: rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); break;
6832 case VMX_EXIT_CPUID: rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); break;
6833 case VMX_EXIT_RDTSC: rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); break;
6834 case VMX_EXIT_RDTSCP: rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); break;
6835 case VMX_EXIT_APIC_ACCESS: rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); break;
6836 case VMX_EXIT_XCPT_NMI: rc = hmR0VmxExitXcptNmi(pVCpu, pMixedCtx, pVmxTransient); break;
6837 case VMX_EXIT_MOV_CRX: rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); break;
6838 case VMX_EXIT_EXT_INT: rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); break;
6839 case VMX_EXIT_INT_WINDOW: rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); break;
6840 case VMX_EXIT_MWAIT: rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); break;
6841 case VMX_EXIT_MONITOR: rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); break;
6842 case VMX_EXIT_TASK_SWITCH: rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); break;
6843 case VMX_EXIT_PREEMPT_TIMER: rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); break;
6844 case VMX_EXIT_RDMSR: rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); break;
6845 case VMX_EXIT_WRMSR: rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); break;
6846 case VMX_EXIT_MOV_DRX: rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); break;
6847 case VMX_EXIT_TPR_BELOW_THRESHOLD: rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); break;
6848 case VMX_EXIT_HLT: rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); break;
6849 case VMX_EXIT_INVD: rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); break;
6850 case VMX_EXIT_INVLPG: rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); break;
6851 case VMX_EXIT_RSM: rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); break;
6852 case VMX_EXIT_MTF: rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); break;
6853 case VMX_EXIT_PAUSE: rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); break;
6854 case VMX_EXIT_XDTR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
6855 case VMX_EXIT_TR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
6856 case VMX_EXIT_WBINVD: rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); break;
6857 case VMX_EXIT_XSETBV: rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); break;
6858 case VMX_EXIT_RDRAND: rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); break;
6859 case VMX_EXIT_INVPCID: rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); break;
6860 case VMX_EXIT_GETSEC: rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); break;
6861 case VMX_EXIT_RDPMC: rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); break;
6862
6863 case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break;
6864 case VMX_EXIT_NMI_WINDOW: rc = hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient); break;
6865 case VMX_EXIT_INIT_SIGNAL: rc = hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient); break;
6866 case VMX_EXIT_SIPI: rc = hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient); break;
6867 case VMX_EXIT_IO_SMI: rc = hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient); break;
6868 case VMX_EXIT_SMI: rc = hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient); break;
6869 case VMX_EXIT_ERR_MSR_LOAD: rc = hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient); break;
6870 case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient); break;
6871 case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient); break;
6872
6873 case VMX_EXIT_VMCALL:
6874 case VMX_EXIT_VMCLEAR:
6875 case VMX_EXIT_VMLAUNCH:
6876 case VMX_EXIT_VMPTRLD:
6877 case VMX_EXIT_VMPTRST:
6878 case VMX_EXIT_VMREAD:
6879 case VMX_EXIT_VMRESUME:
6880 case VMX_EXIT_VMWRITE:
6881 case VMX_EXIT_VMXOFF:
6882 case VMX_EXIT_VMXON:
6883 case VMX_EXIT_INVEPT:
6884 case VMX_EXIT_INVVPID:
6885 case VMX_EXIT_VMFUNC:
6886 rc = hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
6887 break;
6888 default:
6889 rc = hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
6890 break;
6891 }
6892 return rc;
6893}
6894#endif
6895
6896#ifdef DEBUG
6897/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6898# define VMX_ASSERT_PREEMPT_CPUID_VAR() \
6899 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6900# define VMX_ASSERT_PREEMPT_CPUID() \
6901 do \
6902 { \
6903 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6904 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6905 } while (0)
6906
6907# define VMX_VALIDATE_EXIT_HANDLER_PARAMS() \
6908 do { \
6909 AssertPtr(pVCpu); \
6910 AssertPtr(pMixedCtx); \
6911 AssertPtr(pVmxTransient); \
6912 Assert(pVmxTransient->fVMEntryFailed == false); \
6913 Assert(ASMIntAreEnabled()); \
6914 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
6915 VMX_ASSERT_PREEMPT_CPUID_VAR(); \
6916 LogFunc(("vcpu[%u] vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n", \
6917 (unsigned)pVCpu->idCpu)); \
6918 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
6919 if (VMMR0IsLogFlushDisabled(pVCpu)) \
6920 VMX_ASSERT_PREEMPT_CPUID(); \
6921 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6922 } while (0)
6923# define VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
6924 do { \
6925 LogFunc(("\n")); \
6926 } while(0)
6927#else /* Release builds */
6928# define VMX_VALIDATE_EXIT_HANDLER_PARAMS() do { HMVMX_STOP_EXIT_DISPATCH_PROF(); } while(0)
6929# define VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while(0)
6930#endif
6931
6932
6933/**
6934 * Advances the guest RIP after reading it from the VMCS.
6935 *
6936 * @returns VBox status code.
6937 * @param pVCpu Pointer to the VMCPU.
6938 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6939 * out-of-sync. Make sure to update the required fields
6940 * before using them.
6941 * @param pVmxTransient Pointer to the VMX transient structure.
6942 *
6943 * @remarks No-long-jump zone!!!
6944 */
6945DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6946{
6947 int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
6948 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6949 AssertRCReturn(rc, rc);
6950
6951 pMixedCtx->rip += pVmxTransient->cbInstr;
6952 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
6953 return rc;
6954}
6955
6956
6957/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6958/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6959/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6960/**
6961 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
6962 */
6963HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6964{
6965 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
6966 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
6967#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
6968 Assert(ASMIntAreEnabled());
6969 return VINF_SUCCESS;
6970#else
6971 return VINF_EM_RAW_INTERRUPT;
6972#endif
6973}
6974
6975
6976/**
6977 * VM-exit handler for exceptions and NMIs (VMX_EXIT_XCPT_NMI).
6978 */
6979HMVMX_EXIT_DECL hmR0VmxExitXcptNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6980{
6981 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
6982 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
6983
6984 int rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
6985 AssertRCReturn(rc, rc);
6986
6987 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntrInfo);
6988 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT)
6989 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
6990
6991 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
6992 {
6993 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
6994 return VINF_EM_RAW_INTERRUPT;
6995 }
6996
6997 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
6998 rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
6999 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
7000 {
7001 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7002 return VINF_SUCCESS;
7003 }
7004 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
7005 {
7006 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7007 return rc;
7008 }
7009
7010 uint32_t uExitIntrInfo = pVmxTransient->uExitIntrInfo;
7011 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntrInfo);
7012 switch (uIntrType)
7013 {
7014 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
7015 Assert(uVector == X86_XCPT_DB || uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
7016 /* no break */
7017 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
7018 {
7019 switch (uVector)
7020 {
7021 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
7022 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
7023 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;
7024 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
7025 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
7026 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
7027#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7028 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
7029 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7030 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
7031 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7032 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
7033 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7034 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
7035 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7036 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
7037 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7038#endif
7039 default:
7040 {
7041 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7042 AssertRCReturn(rc, rc);
7043
7044 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
7045 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
7046 {
7047 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
7048 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
7049 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7050 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
7051 AssertRCReturn(rc, rc);
7052 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntrInfo),
7053 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode,
7054 0 /* GCPtrFaultAddress */);
7055 AssertRCReturn(rc, rc);
7056 }
7057 else
7058 {
7059 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
7060 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
7061 }
7062 break;
7063 }
7064 }
7065 break;
7066 }
7067
7068 case VMX_EXIT_INTERRUPTION_INFO_TYPE_DB_XCPT:
7069 default:
7070 {
7071 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE;
7072 AssertMsgFailed(("Unexpected interruption code %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntrInfo)));
7073 break;
7074 }
7075 }
7076 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7077 return rc;
7078}
7079
7080
7081/**
7082 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7083 */
7084HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7085{
7086 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7087
7088 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7089 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT);
7090 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT;
7091 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
7092 AssertRCReturn(rc, rc);
7093
7094 /* Deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and resume guest execution. */
7095 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
7096 return VINF_SUCCESS;
7097}
7098
7099
7100/**
7101 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7102 */
7103HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7104{
7105 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7106 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7107 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7108}
7109
7110
7111/**
7112 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7113 */
7114HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7115{
7116 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7117 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
7118 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7119}
7120
7121
7122/**
7123 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7124 */
7125HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7126{
7127 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7128 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
7129 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7130}
7131
7132
7133/**
7134 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7135 */
7136HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7137{
7138 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7139 PVM pVM = pVCpu->CTX_SUFF(pVM);
7140 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7141 if (RT_LIKELY(rc == VINF_SUCCESS))
7142 {
7143 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7144 Assert(pVmxTransient->cbInstr == 2);
7145 }
7146 else
7147 {
7148 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
7149 rc = VERR_EM_INTERPRETER;
7150 }
7151 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
7152 return rc;
7153}
7154
7155
7156/**
7157 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7158 */
7159HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7160{
7161 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7162 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
7163 AssertRCReturn(rc, rc);
7164
7165 if (pMixedCtx->cr4 & X86_CR4_SMXE)
7166 return VINF_EM_RAW_EMULATE_INSTR;
7167
7168 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
7169 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7170}
7171
7172
7173/**
7174 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7175 */
7176HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7177{
7178 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7179 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7180 AssertRCReturn(rc, rc);
7181
7182 PVM pVM = pVCpu->CTX_SUFF(pVM);
7183 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7184 if (RT_LIKELY(rc == VINF_SUCCESS))
7185 {
7186 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7187 Assert(pVmxTransient->cbInstr == 2);
7188 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
7189 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING)
7190 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7191 }
7192 else
7193 {
7194 AssertMsgFailed(("hmR0VmxExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc));
7195 rc = VERR_EM_INTERPRETER;
7196 }
7197 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
7198 return rc;
7199}
7200
7201
7202/**
7203 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7204 */
7205HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7206{
7207 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7208 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7209 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
7210 AssertRCReturn(rc, rc);
7211
7212 PVM pVM = pVCpu->CTX_SUFF(pVM);
7213 rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
7214 if (RT_LIKELY(rc == VINF_SUCCESS))
7215 {
7216 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7217 Assert(pVmxTransient->cbInstr == 3);
7218 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
7219 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING)
7220 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7221 }
7222 else
7223 {
7224 AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
7225 rc = VERR_EM_INTERPRETER;
7226 }
7227 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
7228 return rc;
7229}
7230
7231
7232/**
7233 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7234 */
7235HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7236{
7237 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7238 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7239 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
7240 AssertRCReturn(rc, rc);
7241
7242 PVM pVM = pVCpu->CTX_SUFF(pVM);
7243 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7244 if (RT_LIKELY(rc == VINF_SUCCESS))
7245 {
7246 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7247 Assert(pVmxTransient->cbInstr == 2);
7248 }
7249 else
7250 {
7251 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7252 rc = VERR_EM_INTERPRETER;
7253 }
7254 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
7255 return rc;
7256}
7257
7258
7259/**
7260 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7261 */
7262HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7263{
7264 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7265 PVM pVM = pVCpu->CTX_SUFF(pVM);
7266 Assert(!pVM->hm.s.fNestedPaging);
7267
7268 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
7269 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
7270 AssertRCReturn(rc, rc);
7271
7272 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
7273 rc = VBOXSTRICTRC_VAL(rc2);
7274 if (RT_LIKELY(rc == VINF_SUCCESS))
7275 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7276 else
7277 {
7278 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RGv failed with %Rrc\n",
7279 pVmxTransient->uExitQualification, rc));
7280 }
7281 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
7282 return rc;
7283}
7284
7285
7286/**
7287 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7288 */
7289HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7290{
7291 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7292 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7293 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7294 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7295 AssertRCReturn(rc, rc);
7296
7297 PVM pVM = pVCpu->CTX_SUFF(pVM);
7298 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7299 if (RT_LIKELY(rc == VINF_SUCCESS))
7300 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7301 else
7302 {
7303 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
7304 rc = VERR_EM_INTERPRETER;
7305 }
7306 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
7307 return rc;
7308}
7309
7310
7311/**
7312 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7313 */
7314HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7315{
7316 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7317 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7318 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7319 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7320 AssertRCReturn(rc, rc);
7321
7322 PVM pVM = pVCpu->CTX_SUFF(pVM);
7323 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7324 rc = VBOXSTRICTRC_VAL(rc2);
7325 if (RT_LIKELY( rc == VINF_SUCCESS
7326 || rc == VINF_EM_HALT))
7327 {
7328 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7329 AssertRCReturn(rc3, rc3);
7330
7331 if ( rc == VINF_EM_HALT
7332 && EMShouldContinueAfterHalt(pVCpu, pMixedCtx))
7333 {
7334 rc = VINF_SUCCESS;
7335 }
7336 }
7337 else
7338 {
7339 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
7340 rc = VERR_EM_INTERPRETER;
7341 }
7342 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
7343 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
7344 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
7345 return rc;
7346}
7347
7348
7349/**
7350 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
7351 */
7352HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7353{
7354 /*
7355 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
7356 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
7357 * executing VMCALL in VMX root operation. If we get here something funny is going on.
7358 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
7359 */
7360 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7361 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7362}
7363
7364
7365/**
7366 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
7367 */
7368HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7369{
7370 /*
7371 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
7372 * root operation. If we get there there is something funny going on.
7373 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
7374 */
7375 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7376 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7377}
7378
7379
7380/**
7381 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
7382 */
7383HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7384{
7385 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
7386 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7387 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7388}
7389
7390
7391/**
7392 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
7393 */
7394HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7395{
7396 /*
7397 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
7398 * don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
7399 * See Intel spec. 25.3 "Other Causes of VM-exits".
7400 */
7401 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7402 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7403}
7404
7405
7406/**
7407 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
7408 * VM-exit.
7409 */
7410HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7411{
7412 /*
7413 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM. See Intel spec. "33.14.1 Default Treatment of
7414 * SMI Delivery" and "29.3 VMX Instructions" for "VMXON". It is -NOT- blocked in VMX non-root operation so we can potentially
7415 * still get these exits. See Intel spec. "23.8 Restrictions on VMX operation".
7416 */
7417 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7418 return VINF_SUCCESS; /** @todo r=ramshankar: correct?. */
7419}
7420
7421
7422/**
7423 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7424 * VM-exit.
7425 */
7426HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7427{
7428 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7429 return VINF_EM_RESET;
7430}
7431
7432
7433/**
7434 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7435 */
7436HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7437{
7438 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7439 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT);
7440 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
7441 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7442 AssertRCReturn(rc, rc);
7443
7444 pMixedCtx->rip++;
7445 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7446 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
7447 rc = VINF_SUCCESS;
7448 else
7449 rc = VINF_EM_HALT;
7450
7451 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
7452 return rc;
7453}
7454
7455
7456/**
7457 * VM-exit handler for instructions that result in a #UD exception delivered to the guest.
7458 */
7459HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7460{
7461 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7462 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
7463 return VINF_SUCCESS;
7464}
7465
7466
7467/**
7468 * VM-exit handler for expiry of the VMX preemption timer.
7469 */
7470HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7471{
7472 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7473
7474 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
7475 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7476
7477 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7478 PVM pVM = pVCpu->CTX_SUFF(pVM);
7479 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7480 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
7481 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7482}
7483
7484
7485/**
7486 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7487 */
7488HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7489{
7490 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7491 /* We expose XSETBV to the guest, fallback to the recompiler for emulation. */
7492 /** @todo check if XSETBV is supported by the recompiler. */
7493 return VERR_EM_INTERPRETER;
7494}
7495
7496
7497/**
7498 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7499 */
7500HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7501{
7502 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7503 /* The guest should not invalidate the host CPU's TLBs, fallback to recompiler. */
7504 /** @todo implement EMInterpretInvpcid() */
7505 return VERR_EM_INTERPRETER;
7506}
7507
7508
7509/**
7510 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
7511 * Error VM-exit.
7512 */
7513HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7514{
7515 uint32_t uIntrState;
7516 HMVMXHCUINTREG uHCReg;
7517 uint64_t u64Val;
7518 uint32_t u32Val;
7519
7520 int rc = hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
7521 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
7522 rc |= hmR0VmxReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7523 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
7524 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
7525 AssertRCReturn(rc, rc);
7526
7527 Log(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntrInfo));
7528 Log(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7529 Log(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7530 Log(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));
7531
7532 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
7533 Log(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
7534 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
7535 Log(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
7536 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
7537 Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
7538 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
7539 Log(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
7540 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
7541 Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
7542 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7543 Log(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7544
7545 PVM pVM = pVCpu->CTX_SUFF(pVM);
7546 HMDumpRegs(pVM, pVCpu, pMixedCtx);
7547
7548 return VERR_VMX_INVALID_GUEST_STATE;
7549}
7550
7551
7552/**
7553 * VM-exit handler for VM-entry failure due to an MSR-load
7554 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
7555 */
7556HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7557{
7558 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7559 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7560}
7561
7562
7563/**
7564 * VM-exit handler for VM-entry failure due to a machine-check event
7565 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
7566 */
7567HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7568{
7569 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7570 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7571}
7572
7573
7574/**
7575 * VM-exit handler for all undefined reasons. Should never ever happen.. in
7576 * theory.
7577 */
7578HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7579{
7580 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
7581 return VERR_VMX_UNDEFINED_EXIT_CODE;
7582}
7583
7584
7585/**
7586 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
7587 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
7588 * Conditional VM-exit.
7589 */
7590HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7591{
7592 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7593 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
7594 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
7595 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
7596 return VERR_EM_INTERPRETER;
7597 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7598 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7599}
7600
7601
7602/**
7603 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
7604 */
7605HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7606{
7607 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7608 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
7609 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
7610 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
7611 return VERR_EM_INTERPRETER;
7612 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7613 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7614}
7615
7616
7617/**
7618 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7619 */
7620HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7621{
7622 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7623 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
7624 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7625 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7626 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7627 AssertRCReturn(rc, rc);
7628
7629 PVM pVM = pVCpu->CTX_SUFF(pVM);
7630 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7631 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
7632 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
7633 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
7634
7635 if (RT_LIKELY(rc == VINF_SUCCESS))
7636 {
7637 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7638 Assert(pVmxTransient->cbInstr == 2);
7639 }
7640 return rc;
7641}
7642
7643
7644/**
7645 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7646 */
7647HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7648{
7649 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7650 PVM pVM = pVCpu->CTX_SUFF(pVM);
7651 int rc = VINF_SUCCESS;
7652
7653 /* If TPR patching is active, LSTAR holds the guest TPR, writes to it must be propagated to the APIC. */
7654 if ( pVM->hm.s.fTPRPatchingActive
7655 && pMixedCtx->ecx == MSR_K8_LSTAR)
7656 {
7657 Assert(!CPUMIsGuestInLongModeEx(pMixedCtx)); /* Requires EFER but it's always up-to-date. */
7658 if ((pMixedCtx->eax & 0xff) != pVmxTransient->u8GuestTpr)
7659 {
7660 rc = PDMApicSetTPR(pVCpu, pMixedCtx->eax & 0xff);
7661 AssertRC(rc);
7662 }
7663
7664 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7665 Assert(pVmxTransient->cbInstr == 2);
7666 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
7667 return VINF_SUCCESS;
7668 }
7669
7670 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
7671 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7672 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7673 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7674 AssertRCReturn(rc, rc);
7675 Log(("ecx=%#RX32\n", pMixedCtx->ecx));
7676
7677 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7678 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
7679 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
7680
7681 if (RT_LIKELY(rc == VINF_SUCCESS))
7682 {
7683 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7684
7685 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7686 if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START
7687 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
7688 {
7689 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_APIC_STATE);
7690 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
7691 }
7692 else if (pMixedCtx->ecx == MSR_K6_EFER) /* EFER is the only MSR we auto-load but don't allow write-passthrough. */
7693 {
7694 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
7695 AssertRCReturn(rc, rc);
7696 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS;
7697 }
7698 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7699 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7700
7701 /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */
7702 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)))
7703 {
7704 switch (pMixedCtx->ecx)
7705 {
7706 case MSR_IA32_SYSENTER_CS: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_CS_MSR; break;
7707 case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;
7708 case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;
7709 case MSR_K8_FS_BASE: /* no break */
7710 case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS; break;
7711 /* MSR_K8_KERNEL_GS_BASE: Nothing to do as it's not part of the VMCS. Manually loaded each time on VM-entry. */
7712 }
7713 }
7714#ifdef VBOX_STRICT
7715 else
7716 {
7717 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7718 switch (pMixedCtx->ecx)
7719 {
7720 case MSR_IA32_SYSENTER_CS:
7721 case MSR_IA32_SYSENTER_EIP:
7722 case MSR_IA32_SYSENTER_ESP:
7723 case MSR_K8_FS_BASE:
7724 case MSR_K8_GS_BASE:
7725 {
7726 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
7727 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7728 }
7729
7730 case MSR_K8_LSTAR:
7731 case MSR_K6_STAR:
7732 case MSR_K8_SF_MASK:
7733 case MSR_K8_TSC_AUX:
7734 case MSR_K8_KERNEL_GS_BASE:
7735 {
7736 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7737 pMixedCtx->ecx));
7738 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7739 }
7740 }
7741 }
7742#endif /* VBOX_STRICT */
7743 }
7744 return rc;
7745}
7746
7747
7748/**
7749 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
7750 */
7751HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7752{
7753 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7754 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT. */
7755 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
7756 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
7757 return VERR_EM_INTERPRETER;
7758 AssertMsgFailed(("Unexpected PAUSE exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7759 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7760}
7761
7762
7763/**
7764 * VM-exit handler for when the TPR value is lowered below the specified
7765 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
7766 */
7767HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7768{
7769 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7770 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW);
7771
7772 /*
7773 * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
7774 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and
7775 * resume guest execution.
7776 */
7777 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
7778 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
7779 return VINF_SUCCESS;
7780}
7781
7782
7783/**
7784 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
7785 * VM-exit.
7786 *
7787 * @retval VINF_SUCCESS when guest execution can continue.
7788 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
7789 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
7790 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
7791 * recompiler.
7792 */
7793HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7794{
7795 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7796 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
7797 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
7798 AssertRCReturn(rc, rc);
7799
7800 const RTGCUINTPTR uExitQualification = pVmxTransient->uExitQualification;
7801 const uint32_t uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
7802 PVM pVM = pVCpu->CTX_SUFF(pVM);
7803 switch (uAccessType)
7804 {
7805 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */
7806 {
7807#if 0
7808 /* EMInterpretCRxWrite() references a lot of guest state (EFER, RFLAGS, Segment Registers, etc.) Sync entire state */
7809 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
7810#else
7811 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
7812 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
7813 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7814#endif
7815 AssertRCReturn(rc, rc);
7816
7817 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
7818 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
7819 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
7820 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
7821
7822 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
7823 {
7824 case 0: /* CR0 */
7825 Log(("CRX CR0 write rc=%d CR0=%#RGv\n", rc, pMixedCtx->cr0));
7826 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
7827 break;
7828 case 2: /* C2 **/
7829 /* Nothing to do here, CR2 it's not part of the VMCS. */
7830 break;
7831 case 3: /* CR3 */
7832 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
7833 Log(("CRX CR3 write rc=%d CR3=%#RGv\n", rc, pMixedCtx->cr3));
7834 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
7835 break;
7836 case 4: /* CR4 */
7837 Log(("CRX CR4 write rc=%d CR4=%#RGv\n", rc, pMixedCtx->cr4));
7838 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
7839 break;
7840 case 8: /* CR8 */
7841 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
7842 /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */
7843 /* We don't need to update HM_CHANGED_VMX_GUEST_APIC_STATE here as this -cannot- happen with TPR shadowing. */
7844 break;
7845 default:
7846 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
7847 break;
7848 }
7849
7850 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
7851 break;
7852 }
7853
7854 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */
7855 {
7856 /* EMInterpretCRxRead() requires EFER MSR, CS. */
7857 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7858 AssertRCReturn(rc, rc);
7859 Assert( !pVM->hm.s.fNestedPaging
7860 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
7861 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
7862
7863 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
7864 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
7865 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
7866
7867 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
7868 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
7869 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
7870 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
7871 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
7872 Log(("CRX CR%d Read access rc=%d\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), rc));
7873 break;
7874 }
7875
7876 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
7877 {
7878 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7879 AssertRCReturn(rc, rc);
7880 rc = EMInterpretCLTS(pVM, pVCpu);
7881 AssertRCReturn(rc, rc);
7882 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
7883 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
7884 Log(("CRX CLTS write rc=%d\n", rc));
7885 break;
7886 }
7887
7888 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
7889 {
7890 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7891 AssertRCReturn(rc, rc);
7892 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
7893 if (RT_LIKELY(rc == VINF_SUCCESS))
7894 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
7895 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
7896 Log(("CRX LMSW write rc=%d\n", rc));
7897 break;
7898 }
7899
7900 default:
7901 {
7902 AssertMsgFailed(("Invalid access-type in Mov CRx exit qualification %#x\n", uAccessType));
7903 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
7904 }
7905 }
7906
7907 /* Validate possible error codes. */
7908 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_CHANGE_MODE || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_SYNC_CR3
7909 || rc == VERR_VMX_UNEXPECTED_EXCEPTION);
7910 if (RT_SUCCESS(rc))
7911 {
7912 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7913 AssertRCReturn(rc2, rc2);
7914 }
7915
7916 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
7917 return rc;
7918}
7919
7920
7921/**
7922 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
7923 * VM-exit.
7924 */
7925HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7926{
7927 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7928 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
7929
7930 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
7931 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7932 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
7933 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
7934 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
7935 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
7936 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
7937 AssertRCReturn(rc, rc);
7938 Log(("CS:RIP=%04x:%#RGv\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
7939
7940 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
7941 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
7942 uint32_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
7943 bool fIOWrite = (VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
7944 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
7945 bool fIOString = (VMX_EXIT_QUALIFICATION_IO_STRING(pVmxTransient->uExitQualification) == 1);
7946 Assert(uIOWidth == 0 || uIOWidth == 1 || uIOWidth == 3);
7947
7948 /* I/O operation lookup arrays. */
7949 static const uint32_t s_aIOSize[4] = { 1, 2, 0, 4 }; /* Size of the I/O Accesses. */
7950 static const uint32_t s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
7951
7952 const uint32_t cbSize = s_aIOSize[uIOWidth];
7953 const uint32_t cbInstr = pVmxTransient->cbInstr;
7954 PVM pVM = pVCpu->CTX_SUFF(pVM);
7955 if (fIOString)
7956 {
7957 /* INS/OUTS - I/O String instruction. */
7958 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
7959 /** @todo for now manually disassemble later optimize by getting the fields from
7960 * the VMCS. */
7961 /** @todo VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR contains the flat pointer
7962 * operand of the instruction. VMX_VMCS32_RO_EXIT_INSTR_INFO contains
7963 * segment prefix info. */
7964 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
7965 if (RT_SUCCESS(rc))
7966 {
7967 if (fIOWrite)
7968 {
7969 VBOXSTRICTRC rc2 = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
7970 (DISCPUMODE)pDis->uAddrMode, cbSize);
7971 rc = VBOXSTRICTRC_VAL(rc2);
7972 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
7973 }
7974 else
7975 {
7976 VBOXSTRICTRC rc2 = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
7977 (DISCPUMODE)pDis->uAddrMode, cbSize);
7978 rc = VBOXSTRICTRC_VAL(rc2);
7979 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
7980 }
7981 }
7982 else
7983 {
7984 AssertMsg(rc == VERR_EM_INTERPRETER, ("rc=%Rrc RIP %#RX64\n", rc, pMixedCtx->rip));
7985 rc = VINF_EM_RAW_EMULATE_INSTR;
7986 }
7987 }
7988 else
7989 {
7990 /* IN/OUT - I/O instruction. */
7991 const uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
7992 Assert(!VMX_EXIT_QUALIFICATION_IO_REP(pVmxTransient->uExitQualification));
7993 if (fIOWrite)
7994 {
7995 VBOXSTRICTRC rc2 = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbSize);
7996 rc = VBOXSTRICTRC_VAL(rc2);
7997 if (rc == VINF_IOM_R3_IOPORT_WRITE)
7998 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbSize);
7999 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
8000 }
8001 else
8002 {
8003 uint32_t u32Result = 0;
8004 VBOXSTRICTRC rc2 = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbSize);
8005 rc = VBOXSTRICTRC_VAL(rc2);
8006 if (IOM_SUCCESS(rc))
8007 {
8008 /* Save result of I/O IN instr. in AL/AX/EAX. */
8009 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8010 }
8011 else if (rc == VINF_IOM_R3_IOPORT_READ)
8012 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbSize);
8013 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
8014 }
8015 }
8016
8017 if (IOM_SUCCESS(rc))
8018 {
8019 pMixedCtx->rip += cbInstr;
8020 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
8021 if (RT_LIKELY(rc == VINF_SUCCESS))
8022 {
8023 rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx); /* For DR7. */
8024 AssertRCReturn(rc, rc);
8025
8026 /* If any IO breakpoints are armed, then we should check if a debug trap needs to be generated. */
8027 if (pMixedCtx->dr[7] & X86_DR7_ENABLED_MASK)
8028 {
8029 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
8030 for (unsigned i = 0; i < 4; i++)
8031 {
8032 uint32_t uBPLen = s_aIOSize[X86_DR7_GET_LEN(pMixedCtx->dr[7], i)];
8033 if ( ( uIOPort >= pMixedCtx->dr[i]
8034 && uIOPort < pMixedCtx->dr[i] + uBPLen)
8035 && (pMixedCtx->dr[7] & (X86_DR7_L(i) | X86_DR7_G(i)))
8036 && (pMixedCtx->dr[7] & X86_DR7_RW(i, X86_DR7_RW_IO)) == X86_DR7_RW(i, X86_DR7_RW_IO))
8037 {
8038 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8039 uint64_t uDR6 = ASMGetDR6();
8040
8041 /* Clear all breakpoint status flags and set the one we just hit. */
8042 uDR6 &= ~(X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3);
8043 uDR6 |= (uint64_t)RT_BIT(i);
8044
8045 /*
8046 * Note: AMD64 Architecture Programmer's Manual 13.1:
8047 * Bits 15:13 of the DR6 register is never cleared by the processor and must
8048 * be cleared by software after the contents have been read.
8049 */
8050 ASMSetDR6(uDR6);
8051
8052 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
8053 pMixedCtx->dr[7] &= ~X86_DR7_GD;
8054
8055 /* Paranoia. */
8056 pMixedCtx->dr[7] &= 0xffffffff; /* Upper 32 bits reserved. */
8057 pMixedCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* MBZ. */
8058 pMixedCtx->dr[7] |= 0x400; /* MB1. */
8059
8060 /* Resync DR7 */
8061 /** @todo probably cheaper to just reload DR7, nothing else needs changing. */
8062 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
8063
8064 /* Set #DB to be injected into the VM and continue guest execution. */
8065 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
8066 break;
8067 }
8068 }
8069 }
8070 }
8071 }
8072
8073#ifdef DEBUG
8074 if (rc == VINF_IOM_R3_IOPORT_READ)
8075 Assert(!fIOWrite);
8076 else if (rc == VINF_IOM_R3_IOPORT_WRITE)
8077 Assert(fIOWrite);
8078 else
8079 {
8080 AssertMsg( RT_FAILURE(rc)
8081 || rc == VINF_SUCCESS
8082 || rc == VINF_EM_RAW_EMULATE_INSTR
8083 || rc == VINF_EM_RAW_GUEST_TRAP
8084 || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", rc));
8085 }
8086#endif
8087
8088 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
8089 return rc;
8090}
8091
8092
8093/**
8094 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8095 * VM-exit.
8096 */
8097HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8098{
8099 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8100
8101 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8102 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8103 AssertRCReturn(rc, rc);
8104 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
8105 {
8106 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
8107 AssertRCReturn(rc, rc);
8108 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
8109 {
8110 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
8111 /* Software interrupts and exceptions will be regenerated when the recompiler restarts the instruction. */
8112 if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
8113 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
8114 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
8115 {
8116 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
8117 bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
8118
8119 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */
8120 Assert(!pVCpu->hm.s.Event.fPending);
8121 pVCpu->hm.s.Event.fPending = true;
8122 pVCpu->hm.s.Event.u64IntrInfo = pVmxTransient->uIdtVectoringInfo;
8123 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
8124 AssertRCReturn(rc, rc);
8125 if (fErrorCodeValid)
8126 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
8127 else
8128 pVCpu->hm.s.Event.u32ErrCode = 0;
8129 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
8130 && uVector == X86_XCPT_PF)
8131 {
8132 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2;
8133 }
8134 Log(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
8135 }
8136 }
8137 }
8138 /** @todo Emulate task switch someday, currently just going back to ring-3 for
8139 * emulation. */
8140 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
8141 return VERR_EM_INTERPRETER;
8142}
8143
8144
8145/**
8146 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8147 */
8148HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8149{
8150 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8151 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG);
8152 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
8153 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
8154 AssertRCReturn(rc, rc);
8155 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
8156 return VINF_EM_DBG_STOP;
8157}
8158
8159
8160/**
8161 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8162 */
8163HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8164{
8165 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8166
8167 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8168 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8169 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
8170 return VINF_SUCCESS;
8171 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8172 return rc;
8173
8174#if 0
8175 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
8176 * just sync the whole thing. */
8177 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8178#else
8179 /* Aggressive state sync. for now. */
8180 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8181 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8182 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8183#endif
8184 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8185 AssertRCReturn(rc, rc);
8186
8187 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8188 uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
8189 switch (uAccessType)
8190 {
8191 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8192 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8193 {
8194 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
8195 && VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) == 0x80)
8196 {
8197 AssertMsgFailed(("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8198 }
8199
8200 RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
8201 GCPhys &= PAGE_BASE_GC_MASK;
8202 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
8203 PVM pVM = pVCpu->CTX_SUFF(pVM);
8204 Log(("ApicAccess uAccessType=%#x GCPhys=%RGp Off=%#x\n", uAccessType, GCPhys,
8205 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
8206
8207 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu,
8208 (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
8209 CPUMCTX2CORE(pMixedCtx), GCPhys);
8210 rc = VBOXSTRICTRC_VAL(rc2);
8211 Log(("ApicAccess rc=%d\n", rc));
8212 if ( rc == VINF_SUCCESS
8213 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8214 || rc == VERR_PAGE_NOT_PRESENT)
8215 {
8216 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8217 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8218 rc = VINF_SUCCESS;
8219 }
8220 break;
8221 }
8222
8223 default:
8224 Log(("ApicAccess uAccessType=%#x\n", uAccessType));
8225 rc = VINF_EM_RAW_EMULATE_INSTR;
8226 break;
8227 }
8228
8229 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
8230 return rc;
8231}
8232
8233
8234/**
8235 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8236 * VM-exit.
8237 */
8238HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8239{
8240 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8241
8242 /* We should -not- get this VM-exit if the guest is debugging. */
8243 if (CPUMIsGuestDebugStateActive(pVCpu))
8244 {
8245 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8246 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8247 }
8248
8249 int rc = VERR_INTERNAL_ERROR_5;
8250 if ( !DBGFIsStepping(pVCpu)
8251 && !CPUMIsHyperDebugStateActive(pVCpu))
8252 {
8253 /* Don't intercept MOV DRx. */
8254 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
8255 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
8256 AssertRCReturn(rc, rc);
8257
8258 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8259 PVM pVM = pVCpu->CTX_SUFF(pVM);
8260 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
8261 AssertRC(rc);
8262 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8263
8264#ifdef VBOX_WITH_STATISTICS
8265 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8266 AssertRCReturn(rc, rc);
8267 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
8268 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
8269 else
8270 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
8271#endif
8272 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
8273 return VINF_SUCCESS;
8274 }
8275
8276 /*
8277 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date, see
8278 * hmR0VmxSaveGuestAutoLoadStoreMsrs(). Update only the segment registers from the CPU.
8279 */
8280 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8281 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8282 AssertRCReturn(rc, rc);
8283
8284 PVM pVM = pVCpu->CTX_SUFF(pVM);
8285 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
8286 {
8287 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8288 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
8289 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
8290 if (RT_SUCCESS(rc))
8291 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
8292 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
8293 }
8294 else
8295 {
8296 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8297 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
8298 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
8299 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
8300 }
8301
8302 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8303 if (RT_SUCCESS(rc))
8304 {
8305 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8306 AssertRCReturn(rc2, rc2);
8307 }
8308 return rc;
8309}
8310
8311
8312/**
8313 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8314 * Conditional VM-exit.
8315 */
8316HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8317{
8318 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8319 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
8320
8321 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8322 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8323 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
8324 return VINF_SUCCESS;
8325 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8326 return rc;
8327
8328 RTGCPHYS GCPhys = 0;
8329 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
8330
8331#if 0
8332 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
8333#else
8334 /* Aggressive state sync. for now. */
8335 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8336 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8337 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8338#endif
8339 AssertRCReturn(rc, rc);
8340
8341 /*
8342 * If we succeed, resume guest execution.
8343 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8344 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8345 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8346 * weird case. See @bugref{6043}.
8347 */
8348 PVM pVM = pVCpu->CTX_SUFF(pVM);
8349 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
8350 rc = VBOXSTRICTRC_VAL(rc2);
8351 Log(("EPT misconfig at %#RGv RIP=%#RGv rc=%d\n", GCPhys, pMixedCtx->rip, rc));
8352 if ( rc == VINF_SUCCESS
8353 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8354 || rc == VERR_PAGE_NOT_PRESENT)
8355 {
8356 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8357 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8358 return VINF_SUCCESS;
8359 }
8360 return rc;
8361}
8362
8363
8364/**
8365 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8366 * VM-exit.
8367 */
8368HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8369{
8370 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8371 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
8372
8373 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8374 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8375 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
8376 return VINF_SUCCESS;
8377 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8378 return rc;
8379
8380 RTGCPHYS GCPhys = 0;
8381 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
8382 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8383#if 0
8384 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
8385#else
8386 /* Aggressive state sync. for now. */
8387 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8388 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8389 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8390#endif
8391 AssertRCReturn(rc, rc);
8392
8393 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
8394 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RGv", pVmxTransient->uExitQualification));
8395
8396 RTGCUINT uErrorCode = 0;
8397 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
8398 uErrorCode |= X86_TRAP_PF_ID;
8399 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
8400 uErrorCode |= X86_TRAP_PF_RW;
8401 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
8402 uErrorCode |= X86_TRAP_PF_P;
8403
8404 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8405
8406 Log(("EPT violation %#x at %#RGv ErrorCode %#x CS:EIP=%04x:%#RX64\n", (uint32_t)pVmxTransient->uExitQualification, GCPhys,
8407 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
8408
8409 /* Handle the pagefault trap for the nested shadow table. */
8410 PVM pVM = pVCpu->CTX_SUFF(pVM);
8411 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
8412 TRPMResetTrap(pVCpu);
8413
8414 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8415 if ( rc == VINF_SUCCESS
8416 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8417 || rc == VERR_PAGE_NOT_PRESENT)
8418 {
8419 /* Successfully synced our shadow page tables or emulation MMIO instruction. */
8420 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
8421 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8422 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8423 return VINF_SUCCESS;
8424 }
8425
8426 Log(("EPT return to ring-3 rc=%d\n"));
8427 return rc;
8428}
8429
8430
8431/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8432/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
8433/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8434/**
8435 * VM-exit exception handler for #MF (Math Fault: floating point exception).
8436 */
8437static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8438{
8439 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8440 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
8441
8442 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8443 AssertRCReturn(rc, rc);
8444
8445 if (!(pMixedCtx->cr0 & X86_CR0_NE))
8446 {
8447 /* Old-style FPU error reporting needs some extra work. */
8448 /** @todo don't fall back to the recompiler, but do it manually. */
8449 return VERR_EM_INTERPRETER;
8450 }
8451 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8452 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8453 return rc;
8454}
8455
8456
8457/**
8458 * VM-exit exception handler for #BP (Breakpoint exception).
8459 */
8460static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8461{
8462 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8463 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
8464
8465 /** @todo Try optimize this by not saving the entire guest state unless
8466 * really needed. */
8467 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8468 AssertRCReturn(rc, rc);
8469
8470 PVM pVM = pVCpu->CTX_SUFF(pVM);
8471 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8472 if (rc == VINF_EM_RAW_GUEST_TRAP)
8473 {
8474 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8475 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8476 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8477 AssertRCReturn(rc, rc);
8478
8479 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8480 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8481 }
8482
8483 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
8484 return rc;
8485}
8486
8487
8488/**
8489 * VM-exit exception handler for #DB (Debug exception).
8490 */
8491static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8492{
8493 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8494 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
8495
8496 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8497 AssertRCReturn(rc, rc);
8498
8499 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
8500 uint64_t uDR6 = X86_DR6_INIT_VAL;
8501 uDR6 |= (pVmxTransient->uExitQualification
8502 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
8503 PVM pVM = pVCpu->CTX_SUFF(pVM);
8504 rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6);
8505 if (rc == VINF_EM_RAW_GUEST_TRAP)
8506 {
8507 /* DR6, DR7.GD and IA32_DEBUGCTL.LBR are not updated yet. See Intel spec. 27.1 "Architectural State before a VM-Exit". */
8508 pMixedCtx->dr[6] = uDR6;
8509
8510 if (CPUMIsGuestDebugStateActive(pVCpu))
8511 ASMSetDR6(pMixedCtx->dr[6]);
8512
8513 rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx);
8514
8515 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
8516 pMixedCtx->dr[7] &= ~X86_DR7_GD;
8517
8518 /* Paranoia. */
8519 pMixedCtx->dr[7] &= 0xffffffff; /* upper 32 bits reserved */
8520 pMixedCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
8521 pMixedCtx->dr[7] |= 0x400; /* must be one */
8522
8523 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
8524 AssertRCReturn(rc,rc);
8525
8526 int rc2 = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8527 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8528 rc2 |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8529 AssertRCReturn(rc2, rc2);
8530 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8531 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8532 rc = VINF_SUCCESS;
8533 }
8534
8535 return rc;
8536}
8537
8538
8539/**
8540 * VM-exit exception handler for #NM (Device-not-available exception: floating
8541 * point exception).
8542 */
8543static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8544{
8545 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8546
8547#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
8548 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
8549#endif
8550
8551 /* We require CR0 and EFER. EFER is always up-to-date. */
8552 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8553 AssertRCReturn(rc, rc);
8554
8555 /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */
8556 PVM pVM = pVCpu->CTX_SUFF(pVM);
8557 rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8558 if (rc == VINF_SUCCESS)
8559 {
8560 Assert(CPUMIsGuestFPUStateActive(pVCpu));
8561 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
8562 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
8563 return VINF_SUCCESS;
8564 }
8565
8566 /* Forward #NM to the guest. */
8567 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
8568 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8569 AssertRCReturn(rc, rc);
8570 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8571 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
8572 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
8573 return rc;
8574}
8575
8576
8577/**
8578 * VM-exit exception handler for #GP (General-protection exception).
8579 *
8580 * @remarks Requires pVmxTransient->uExitIntrInfo to be up-to-date.
8581 */
8582static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8583{
8584 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8585 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
8586
8587 int rc = VERR_INTERNAL_ERROR_5;
8588 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
8589 {
8590#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
8591 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
8592 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8593 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8594 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8595 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8596 AssertRCReturn(rc, rc);
8597 Log(("#GP Gst: RIP %#RX64 ErrorCode=%#x CR0=%#RGv CPL=%u\n", pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode,
8598 pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu)));
8599 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8600 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8601 return rc;
8602#else
8603 /* We don't intercept #GP. */
8604 AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
8605 return VERR_VMX_UNEXPECTED_EXCEPTION;
8606#endif
8607 }
8608
8609 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
8610 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
8611
8612 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
8613 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8614 AssertRCReturn(rc, rc);
8615
8616 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
8617 uint32_t cbOp = 0;
8618 PVM pVM = pVCpu->CTX_SUFF(pVM);
8619 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
8620 if (RT_SUCCESS(rc))
8621 {
8622 rc = VINF_SUCCESS;
8623 Assert(cbOp == pDis->cbInstr);
8624 Log(("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
8625 switch (pDis->pCurInstr->uOpcode)
8626 {
8627 case OP_CLI:
8628 pMixedCtx->eflags.Bits.u1IF = 0;
8629 pMixedCtx->rip += pDis->cbInstr;
8630 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
8631 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
8632 break;
8633
8634 case OP_STI:
8635 pMixedCtx->eflags.Bits.u1IF = 1;
8636 pMixedCtx->rip += pDis->cbInstr;
8637 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
8638 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
8639 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
8640 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
8641 break;
8642
8643 case OP_HLT:
8644 rc = VINF_EM_HALT;
8645 pMixedCtx->rip += pDis->cbInstr;
8646 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
8647 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
8648 break;
8649
8650 case OP_POPF:
8651 {
8652 Log(("POPF CS:RIP %04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
8653 uint32_t cbParm = 0;
8654 uint32_t uMask = 0;
8655 if (pDis->fPrefix & DISPREFIX_OPSIZE)
8656 {
8657 cbParm = 4;
8658 uMask = 0xffffffff;
8659 }
8660 else
8661 {
8662 cbParm = 2;
8663 uMask = 0xffff;
8664 }
8665
8666 /* Get the stack pointer & pop the contents of the stack onto EFlags. */
8667 RTGCPTR GCPtrStack = 0;
8668 X86EFLAGS uEflags;
8669 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
8670 &GCPtrStack);
8671 if (RT_SUCCESS(rc))
8672 {
8673 Assert(sizeof(uEflags.u32) >= cbParm);
8674 uEflags.u32 = 0;
8675 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &uEflags.u32, cbParm);
8676 }
8677 if (RT_FAILURE(rc))
8678 {
8679 rc = VERR_EM_INTERPRETER;
8680 break;
8681 }
8682 Log(("POPF %x -> %#RGv mask=%x RIP=%#RX64\n", uEflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
8683 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
8684 | (uEflags.u32 & X86_EFL_POPF_BITS & uMask);
8685 /* The RF bit is always cleared by POPF; see Intel Instruction reference for POPF. */
8686 pMixedCtx->eflags.Bits.u1RF = 0;
8687 pMixedCtx->esp += cbParm;
8688 pMixedCtx->esp &= uMask;
8689 pMixedCtx->rip += pDis->cbInstr;
8690 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
8691 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
8692 break;
8693 }
8694
8695 case OP_PUSHF:
8696 {
8697 uint32_t cbParm = 0;
8698 uint32_t uMask = 0;
8699 if (pDis->fPrefix & DISPREFIX_OPSIZE)
8700 {
8701 cbParm = 4;
8702 uMask = 0xffffffff;
8703 }
8704 else
8705 {
8706 cbParm = 2;
8707 uMask = 0xffff;
8708 }
8709
8710 /* Get the stack pointer & push the contents of eflags onto the stack. */
8711 RTGCPTR GCPtrStack = 0;
8712 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
8713 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
8714 if (RT_FAILURE(rc))
8715 {
8716 rc = VERR_EM_INTERPRETER;
8717 break;
8718 }
8719 X86EFLAGS uEflags;
8720 uEflags = pMixedCtx->eflags;
8721 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
8722 uEflags.Bits.u1RF = 0;
8723 uEflags.Bits.u1VM = 0;
8724
8725 rc = PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &uEflags.u, cbParm);
8726 if (RT_FAILURE(rc))
8727 {
8728 rc = VERR_EM_INTERPRETER;
8729 break;
8730 }
8731 Log(("PUSHF %x -> %#RGv\n", uEflags.u, GCPtrStack));
8732 pMixedCtx->esp -= cbParm;
8733 pMixedCtx->esp &= uMask;
8734 pMixedCtx->rip += pDis->cbInstr;
8735 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP;
8736 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
8737 break;
8738 }
8739
8740 case OP_IRET:
8741 {
8742 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
8743 * instruction reference. */
8744 RTGCPTR GCPtrStack = 0;
8745 uint32_t uMask = 0xffff;
8746 uint16_t aIretFrame[3];
8747 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
8748 {
8749 rc = VERR_EM_INTERPRETER;
8750 break;
8751 }
8752 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
8753 &GCPtrStack);
8754 if (RT_SUCCESS(rc))
8755 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame));
8756 if (RT_FAILURE(rc))
8757 {
8758 rc = VERR_EM_INTERPRETER;
8759 break;
8760 }
8761 pMixedCtx->eip = 0;
8762 pMixedCtx->ip = aIretFrame[0];
8763 pMixedCtx->cs.Sel = aIretFrame[1];
8764 pMixedCtx->cs.ValidSel = aIretFrame[1];
8765 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
8766 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
8767 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
8768 pMixedCtx->sp += sizeof(aIretFrame);
8769 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_SEGMENT_REGS | HM_CHANGED_GUEST_RSP
8770 | HM_CHANGED_GUEST_RFLAGS;
8771 Log(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
8772 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
8773 break;
8774 }
8775
8776 case OP_INT:
8777 {
8778 uint16_t uVector = pDis->Param1.uValue & 0xff;
8779 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
8780 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
8781 break;
8782 }
8783
8784 case OP_INTO:
8785 {
8786 if (pMixedCtx->eflags.Bits.u1OF)
8787 {
8788 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
8789 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
8790 }
8791 break;
8792 }
8793
8794 default:
8795 {
8796 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
8797 EMCODETYPE_SUPERVISOR);
8798 rc = VBOXSTRICTRC_VAL(rc2);
8799 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
8800 Log(("#GP rc=%Rrc\n", rc));
8801 break;
8802 }
8803 }
8804 }
8805 else
8806 rc = VERR_EM_INTERPRETER;
8807
8808 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
8809 ("#GP Unexpected rc=%Rrc\n", rc));
8810 return rc;
8811}
8812
8813
8814/**
8815 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
8816 * the exception reported in the VMX transient structure back into the VM.
8817 *
8818 * @remarks Requires uExitIntrInfo in the VMX transient structure to be
8819 * up-to-date.
8820 */
8821static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8822{
8823 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8824
8825 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
8826 hmR0VmxCheckExitDueToEventDelivery(). */
8827 int rc = hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8828 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8829 AssertRCReturn(rc, rc);
8830 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
8831 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8832 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8833 return VINF_SUCCESS;
8834}
8835
8836
8837/**
8838 * VM-exit exception handler for #PF (Page-fault exception).
8839 */
8840static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8841{
8842 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8843 PVM pVM = pVCpu->CTX_SUFF(pVM);
8844 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8845 rc |= hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8846 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8847 AssertRCReturn(rc, rc);
8848
8849#if defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) || defined(HMVMX_ALWAYS_TRAP_PF)
8850 if (pVM->hm.s.fNestedPaging)
8851 {
8852 if (RT_LIKELY(!pVmxTransient->fVectoringPF))
8853 {
8854 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
8855 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
8856 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8857 0 /* cbInstr */, pVmxTransient->uExitIntrErrorCode, pVmxTransient->uExitQualification);
8858 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
8859 }
8860 else
8861 {
8862 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
8863 pVCpu->hm.s.Event.fPending = false; /* A vectoring #PF. */
8864 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
8865 Log(("Pending #DF due to vectoring #PF. NP\n"));
8866 }
8867 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
8868 return rc;
8869 }
8870#else
8871 Assert(!pVM->hm.s.fNestedPaging);
8872#endif
8873
8874#ifdef VBOX_HM_WITH_GUEST_PATCHING
8875 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8876 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8877 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8878 AssertRCReturn(rc, rc);
8879 /* Shortcut for APIC TPR access, only for 32-bit guests. */
8880 if ( pVM->hm.s.fTRPPatchingAllowed
8881 && pVM->hm.s.pGuestPatchMem
8882 && (pVmxTransient->uExitQualification & 0xfff) == 0x80 /* TPR offset */
8883 && !(pVmxTransient->uExitIntrErrorCode & X86_TRAP_PF_P) /* Page not present */
8884 && CPUMGetGuestCPL(pVCpu) == 0 /* Requires CR0, EFLAGS, segments. */
8885 && !CPUMIsGuestInLongModeEx(pMixedCtx) /* Requires EFER. */
8886 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
8887 {
8888 RTGCPHYS GCPhys;
8889 RTGCPHYS GCPhysApicBase = (pMixedCtx->msrApicBase & PAGE_BASE_GC_MASK);
8890 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pVmxTransient->uExitQualification, NULL /* pfFlags */, &GCPhys);
8891 if ( rc == VINF_SUCCESS
8892 && GCPhys == GCPhysApicBase)
8893 {
8894 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
8895 AssertRCReturn(rc, rc);
8896
8897 /* Only attempt to patch the instruction once. */
8898 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pMixedCtx->eip);
8899 if (!pPatch)
8900 return VINF_EM_HM_PATCH_TPR_INSTR;
8901 }
8902 }
8903#endif
8904
8905 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8906 AssertRCReturn(rc, rc);
8907
8908 Log(("#PF: cr2=%#RGv cs:rip=%#04x:%#RGv uErrCode %#RX32 cr3=%#RGv\n", pVmxTransient->uExitQualification, pMixedCtx->cs.Sel,
8909 pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode, pMixedCtx->cr3));
8910
8911 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntrErrorCode);
8912 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntrErrorCode, CPUMCTX2CORE(pMixedCtx),
8913 (RTGCPTR)pVmxTransient->uExitQualification);
8914
8915 Log(("#PF: rc=%Rrc\n", rc));
8916 if (rc == VINF_SUCCESS)
8917 {
8918 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
8919 /** @todo this isn't quite right, what if guest does lgdt with some MMIO
8920 * memory? We don't update the whole state here... */
8921 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8922 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8923 TRPMResetTrap(pVCpu);
8924 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
8925 return rc;
8926 }
8927 else if (rc == VINF_EM_RAW_GUEST_TRAP)
8928 {
8929 if (!pVmxTransient->fVectoringPF)
8930 {
8931 /* It's a guest page fault and needs to be reflected to the guest. */
8932 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
8933 TRPMResetTrap(pVCpu);
8934 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
8935 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
8936 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8937 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
8938 }
8939 else
8940 {
8941 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
8942 TRPMResetTrap(pVCpu);
8943 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF for replace it with #DF. */
8944 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
8945 Log(("#PF: Pending #DF due to vectoring #PF\n"));
8946 }
8947
8948 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
8949 return VINF_SUCCESS;
8950 }
8951
8952 TRPMResetTrap(pVCpu);
8953 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
8954 return rc;
8955}
8956
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette