VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 45955

Last change on this file since 45955 was 45955, checked in by vboxsync, 12 years ago

VMM: Name fixes for non-static functions.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 380.1 KB
Line 
1/* $Id: HMVMXR0.cpp 45955 2013-05-08 18:04:10Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <iprt/asm-amd64-x86.h>
23#include <iprt/thread.h>
24#include <iprt/string.h>
25
26#include "HMInternal.h"
27#include <VBox/vmm/vm.h>
28#include "HWVMXR0.h"
29#include <VBox/vmm/pdmapi.h>
30#include <VBox/vmm/dbgf.h>
31#include <VBox/vmm/iom.h>
32#include <VBox/vmm/selm.h>
33#include <VBox/vmm/tm.h>
34#ifdef VBOX_WITH_REM
35# include <VBox/vmm/rem.h>
36#endif
37#ifdef DEBUG_ramshankar
38#define HMVMX_SAVE_FULL_GUEST_STATE
39#define HMVMX_SYNC_FULL_GUEST_STATE
40#define HMVMX_ALWAYS_TRAP_ALL_XCPTS
41#define HMVMX_ALWAYS_TRAP_PF
42#endif
43
44
45/*******************************************************************************
46* Defined Constants And Macros *
47*******************************************************************************/
48#define HMVMXHCUINTREG RTHCUINTREG
49#if defined(RT_ARCH_AMD64)
50# define HMVMX_IS_64BIT_HOST_MODE() (true)
51#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
52extern "C" uint32_t g_fVMXIs64bitHost;
53# define HMVMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)
54# undef HMVMXHCUINTREG
55# define HMVMXHCUINTREG uint64_t
56#else
57# define HMVMX_IS_64BIT_HOST_MODE() (false)
58#endif
59
60/** Use the function table. */
61#define HMVMX_USE_FUNCTION_TABLE
62
63/** This bit indicates the segment selector is unusable in VT-x. */
64#define HMVMX_SEL_UNUSABLE RT_BIT(16)
65
66/** Determine which tagged-TLB flush handler to use. */
67#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
68#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
69#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
70#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
71
72/** Updated-guest-state flags. */
73#define HMVMX_UPDATED_GUEST_RIP RT_BIT(0)
74#define HMVMX_UPDATED_GUEST_RSP RT_BIT(1)
75#define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2)
76#define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3)
77#define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4)
78#define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5)
79#define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6)
80#define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7)
81#define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8)
82#define HMVMX_UPDATED_GUEST_TR RT_BIT(9)
83#define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10)
84#define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(11)
85#define HMVMX_UPDATED_GUEST_FS_BASE_MSR RT_BIT(12)
86#define HMVMX_UPDATED_GUEST_GS_BASE_MSR RT_BIT(13)
87#define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(14)
88#define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(15)
89#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(16)
90#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(17)
91#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(18)
92#define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19)
93#define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \
94 | HMVMX_UPDATED_GUEST_RSP \
95 | HMVMX_UPDATED_GUEST_RFLAGS \
96 | HMVMX_UPDATED_GUEST_CR0 \
97 | HMVMX_UPDATED_GUEST_CR3 \
98 | HMVMX_UPDATED_GUEST_CR4 \
99 | HMVMX_UPDATED_GUEST_GDTR \
100 | HMVMX_UPDATED_GUEST_IDTR \
101 | HMVMX_UPDATED_GUEST_LDTR \
102 | HMVMX_UPDATED_GUEST_TR \
103 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \
104 | HMVMX_UPDATED_GUEST_DEBUG \
105 | HMVMX_UPDATED_GUEST_FS_BASE_MSR \
106 | HMVMX_UPDATED_GUEST_GS_BASE_MSR \
107 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \
108 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \
109 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \
110 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \
111 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \
112 | HMVMX_UPDATED_GUEST_APIC_STATE)
113
114/**
115 * Flags to skip redundant reads of some common VMCS fields that are not part of
116 * the guest-CPU state but are in the transient structure.
117 */
118#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
119#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
120#define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
121#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3)
122#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
123#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
124
125/**
126 * Exception bitmap mask for real-mode guests (real-on-v86). We need to intercept all exceptions manually (except #PF).
127 * #NM is also handled spearetely, see hmR0VmxLoadGuestControlRegs(). #PF need not be intercepted even in real-mode if
128 * we have Nested Paging support.
129 */
130#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) \
131 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
132 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
133 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
134 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
135 | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \
136 | RT_BIT(X86_XCPT_XF))
137
138/**
139 * Exception bitmap mask for all contributory exceptions.
140 */
141#define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
142 | RT_BIT(X86_XCPT_DE))
143
144/** Maximum VM-instruction error number. */
145#define HMVMX_INSTR_ERROR_MAX 28
146
147/** Profiling macro. */
148#ifdef HM_PROFILE_EXIT_DISPATCH
149# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
150# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
151#else
152# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
153# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
154#endif
155
156
157/*******************************************************************************
158* Structures and Typedefs *
159*******************************************************************************/
160/**
161 * A state structure for holding miscellaneous information across
162 * VMX non-root operation and restored after the transition.
163 */
164typedef struct VMXTRANSIENT
165{
166 /** The host's rflags/eflags. */
167 RTCCUINTREG uEFlags;
168#if HC_ARCH_BITS == 32
169 uint32_t u32Alignment0;
170#endif
171 /** The guest's LSTAR MSR value used for TPR patching for 32-bit guests. */
172 uint64_t u64LStarMsr;
173 /** The guest's TPR value used for TPR shadowing. */
174 uint8_t u8GuestTpr;
175 /** Alignment. */
176 uint8_t abAlignment0[6];
177
178 /** The basic VM-exit reason. */
179 uint16_t uExitReason;
180 /** Alignment. */
181 uint16_t u16Alignment0;
182 /** The VM-exit interruption error code. */
183 uint32_t uExitIntrErrorCode;
184 /** The VM-exit exit qualification. */
185 RTGCUINTPTR uExitQualification;
186#if GC_ARCH_BITS == 32
187 /** Alignment. */
188 uint32_t u32Alignment1;
189#endif
190
191 /** The VM-exit interruption-information field. */
192 uint32_t uExitIntrInfo;
193 /** The VM-exit instruction-length field. */
194 uint32_t cbInstr;
195 /** Whether the VM-entry failed or not. */
196 bool fVMEntryFailed;
197 /** Alignment. */
198 uint8_t abAlignment1[5];
199
200 /** The VM-entry interruption-information field. */
201 uint32_t uEntryIntrInfo;
202 /** The VM-entry exception error code field. */
203 uint32_t uEntryXcptErrorCode;
204 /** The VM-entry instruction length field. */
205 uint32_t cbEntryInstr;
206
207 /** IDT-vectoring information field. */
208 uint32_t uIdtVectoringInfo;
209 /** IDT-vectoring error code. */
210 uint32_t uIdtVectoringErrorCode;
211
212 /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
213 uint32_t fVmcsFieldsRead;
214 /** Whether TSC-offsetting should be setup before VM-entry. */
215 bool fUpdateTscOffsettingAndPreemptTimer;
216 /** Whether the VM-exit was caused by a page-fault during delivery of a
217 * contributary exception or a page-fault. */
218 bool fVectoringPF;
219} VMXTRANSIENT, *PVMXTRANSIENT;
220AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
221AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntrInfo, sizeof(uint64_t));
222AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntrInfo, sizeof(uint64_t));
223
224
225/**
226 * MSR-bitmap read permissions.
227 */
228typedef enum VMXMSREXITREAD
229{
230 /** Reading this MSR causes a VM-exit. */
231 VMXMSREXIT_INTERCEPT_READ = 0xb,
232 /** Reading this MSR does not cause a VM-exit. */
233 VMXMSREXIT_PASSTHRU_READ
234} VMXMSREXITREAD;
235
236/**
237 * MSR-bitmap write permissions.
238 */
239typedef enum VMXMSREXITWRITE
240{
241 /** Writing to this MSR causes a VM-exit. */
242 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
243 /** Writing to this MSR does not cause a VM-exit. */
244 VMXMSREXIT_PASSTHRU_WRITE
245} VMXMSREXITWRITE;
246
247
248/*******************************************************************************
249* Internal Functions *
250*******************************************************************************/
251static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr);
252static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
253 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState);
254#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
255static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
256#endif
257#ifndef HMVMX_USE_FUNCTION_TABLE
258DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
259#define HMVMX_EXIT_DECL static int
260#else
261#define HMVMX_EXIT_DECL static DECLCALLBACK(int)
262#endif
263
264HMVMX_EXIT_DECL hmR0VmxExitXcptNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
265HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
266HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
267HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
268HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
269HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
270HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
271HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
272HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
273HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
274HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
275HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
276HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
277HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
278HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
279HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
280HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
281HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
282HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
283HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
284HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
285HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
286HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
287HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
288HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
289HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
290HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
291HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
292HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
293HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
294HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
295HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
296HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
297HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
298HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
299HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
300HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
301HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
302HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
303HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
304HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
305HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
306HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
307HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
308
309static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
310static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
311static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
312static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
313static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
314static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
315static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
316
317
318/*******************************************************************************
319* Global Variables *
320*******************************************************************************/
321#ifdef HMVMX_USE_FUNCTION_TABLE
322/**
323 * VM-exit handler.
324 *
325 * @returns VBox status code.
326 * @param pVCpu Pointer to the VMCPU.
327 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
328 * out-of-sync. Make sure to update the required
329 * fields before using them.
330 * @param pVmxTransient Pointer to the VMX-transient structure.
331 */
332typedef DECLCALLBACK(int) FNVMEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
333/** Pointer to VM-exit handler. */
334typedef FNVMEXITHANDLER *const PFNVMEXITHANDLER;
335
336/**
337 * VMX_EXIT dispatch table.
338 */
339static const PFNVMEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
340{
341 /* 00 VMX_EXIT_XCPT_NMI */ hmR0VmxExitXcptNmi,
342 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
343 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
344 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
345 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
346 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
347 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
348 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
349 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
350 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
351 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
352 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
353 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
354 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
355 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
356 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
357 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
358 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
359 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitSetPendingXcptUD,
360 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
361 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
362 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
363 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
364 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
365 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
366 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
367 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
368 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
369 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
370 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
371 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
372 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
373 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
374 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
375 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
376 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
377 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
378 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
379 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
380 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
381 /* 40 UNDEFINED */ hmR0VmxExitPause,
382 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
383 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
384 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
385 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
386 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
387 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
388 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
389 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
390 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
391 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
392 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
393 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
394 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
395 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
396 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
397 /* 56 UNDEFINED */ hmR0VmxExitErrUndefined,
398 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
399 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
400 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD
401};
402#endif /* HMVMX_USE_FUNCTION_TABLE */
403
404#ifdef VBOX_STRICT
405static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
406{
407 /* 0 */ "(Not Used)",
408 /* 1 */ "VMCALL executed in VMX root operation.",
409 /* 2 */ "VMCLEAR with invalid physical address.",
410 /* 3 */ "VMCLEAR with VMXON pointer.",
411 /* 4 */ "VMLAUNCH with non-clear VMCS.",
412 /* 5 */ "VMRESUME with non-launched VMCS.",
413 /* 6 */ "VMRESUME after VMXOFF",
414 /* 7 */ "VM entry with invalid control fields.",
415 /* 8 */ "VM entry with invalid host state fields.",
416 /* 9 */ "VMPTRLD with invalid physical address.",
417 /* 10 */ "VMPTRLD with VMXON pointer.",
418 /* 11 */ "VMPTRLD with incorrect revision identifier.",
419 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
420 /* 13 */ "VMWRITE to read-only VMCS component.",
421 /* 14 */ "(Not Used)",
422 /* 15 */ "VMXON executed in VMX root operation.",
423 /* 16 */ "VM entry with invalid executive-VMCS pointer.",
424 /* 17 */ "VM entry with non-launched executing VMCS.",
425 /* 18 */ "VM entry with executive-VMCS pointer not VMXON pointer.",
426 /* 19 */ "VMCALL with non-clear VMCS.",
427 /* 20 */ "VMCALL with invalid VM-exit control fields.",
428 /* 21 */ "(Not Used)",
429 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
430 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
431 /* 24 */ "VMCALL with invalid SMM-monitor features.",
432 /* 25 */ "VM entry with invalid VM-execution control fields in executive VMCS.",
433 /* 26 */ "VM entry with events blocked by MOV SS.",
434 /* 27 */ "(Not Used)",
435 /* 28 */ "Invalid operand to INVEPT/INVVPID."
436};
437#endif /* VBOX_STRICT */
438
439
440
441/**
442 * Updates the VM's last error record. If there was a VMX instruction error,
443 * reads the error data from the VMCS and updates VCPU's last error record as
444 * well.
445 *
446 * @param pVM Pointer to the VM.
447 * @param pVCpu Pointer to the VMCPU (can be NULL if @a rc is not
448 * VERR_VMX_UNABLE_TO_START_VM or
449 * VERR_VMX_INVALID_VMCS_FIELD).
450 * @param rc The error code.
451 */
452static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
453{
454 AssertPtr(pVM);
455 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
456 || rc == VERR_VMX_UNABLE_TO_START_VM)
457 {
458 AssertPtrReturnVoid(pVCpu);
459 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.lasterror.u32InstrError);
460 }
461 pVM->hm.s.lLastError = rc;
462}
463
464
465/**
466 * Reads the VM-entry interruption-information field from the VMCS into the VMX
467 * transient structure.
468 *
469 * @returns VBox status code.
470 * @param pVmxTransient Pointer to the VMX transient structure.
471 *
472 * @remarks No-long-jump zone!!!
473 */
474DECLINLINE(int) hmR0VmxReadEntryIntrInfoVmcs(PVMXTRANSIENT pVmxTransient)
475{
476 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntrInfo);
477 AssertRCReturn(rc, rc);
478 return VINF_SUCCESS;
479}
480
481
482/**
483 * Reads the VM-entry exception error code field from the VMCS into
484 * the VMX transient structure.
485 *
486 * @returns VBox status code.
487 * @param pVmxTransient Pointer to the VMX transient structure.
488 *
489 * @remarks No-long-jump zone!!!
490 */
491DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
492{
493 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
494 AssertRCReturn(rc, rc);
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Reads the VM-entry exception error code field from the VMCS into
501 * the VMX transient structure.
502 *
503 * @returns VBox status code.
504 * @param pVCpu Pointer to the VMCPU.
505 * @param pVmxTransient Pointer to the VMX transient structure.
506 *
507 * @remarks No-long-jump zone!!!
508 */
509DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
510{
511 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
512 AssertRCReturn(rc, rc);
513 return VINF_SUCCESS;
514}
515
516
517/**
518 * Reads the VM-exit interruption-information field from the VMCS into the VMX
519 * transient structure.
520 *
521 * @returns VBox status code.
522 * @param pVCpu Pointer to the VMCPU.
523 * @param pVmxTransient Pointer to the VMX transient structure.
524 */
525DECLINLINE(int) hmR0VmxReadExitIntrInfoVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
526{
527 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
528 {
529 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntrInfo);
530 AssertRCReturn(rc, rc);
531 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
532 }
533 return VINF_SUCCESS;
534}
535
536
537/**
538 * Reads the VM-exit interruption error code from the VMCS into the VMX
539 * transient structure.
540 *
541 * @returns VBox status code.
542 * @param pVCpu Pointer to the VMCPU.
543 * @param pVmxTransient Pointer to the VMX transient structure.
544 */
545DECLINLINE(int) hmR0VmxReadExitIntrErrorCodeVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
546{
547 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
548 {
549 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntrErrorCode);
550 AssertRCReturn(rc, rc);
551 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
552 }
553 return VINF_SUCCESS;
554}
555
556
557/**
558 * Reads the VM-exit instruction length field from the VMCS into the VMX
559 * transient structure.
560 *
561 * @returns VBox status code.
562 * @param pVCpu Pointer to the VMCPU.
563 * @param pVmxTransient Pointer to the VMX transient structure.
564 */
565DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
566{
567 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
568 {
569 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
570 AssertRCReturn(rc, rc);
571 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
572 }
573 return VINF_SUCCESS;
574}
575
576
577/**
578 * Reads the exit qualification from the VMCS into the VMX transient structure.
579 *
580 * @returns VBox status code.
581 * @param pVCpu Pointer to the VMCPU.
582 * @param pVmxTransient Pointer to the VMX transient structure.
583 */
584DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
585{
586 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
587 {
588 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification);
589 AssertRCReturn(rc, rc);
590 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
591 }
592 return VINF_SUCCESS;
593}
594
595
596/**
597 * Reads the IDT-vectoring information field from the VMCS into the VMX
598 * transient structure.
599 *
600 * @returns VBox status code.
601 * @param pVmxTransient Pointer to the VMX transient structure.
602 *
603 * @remarks No-long-jump zone!!!
604 */
605DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
606{
607 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
608 {
609 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
610 AssertRCReturn(rc, rc);
611 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
612 }
613 return VINF_SUCCESS;
614}
615
616
617/**
618 * Reads the IDT-vectoring error code from the VMCS into the VMX
619 * transient structure.
620 *
621 * @returns VBox status code.
622 * @param pVmxTransient Pointer to the VMX transient structure.
623 */
624DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
625{
626 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
627 {
628 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
629 AssertRCReturn(rc, rc);
630 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
631 }
632 return VINF_SUCCESS;
633}
634
635
636/**
637 * Enters VMX root mode operation on the current CPU.
638 *
639 * @returns VBox status code.
640 * @param pVM Pointer to the VM (optional, can be NULL, after
641 * a resume).
642 * @param HCPhysCpuPage Physical address of the VMXON region.
643 * @param pvCpuPage Pointer to the VMXON region.
644 */
645static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
646{
647 AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
648 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
649 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
650
651 if (pVM)
652 {
653 /* Write the VMCS revision dword to the VMXON region. */
654 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
655 }
656
657 /* Enable the VMX bit in CR4 if necessary. */
658 RTCCUINTREG uCr4 = ASMGetCR4();
659 if (!(uCr4 & X86_CR4_VMXE))
660 ASMSetCR4(uCr4 | X86_CR4_VMXE);
661
662 /* Enter VMX root mode. */
663 int rc = VMXEnable(HCPhysCpuPage);
664 if (RT_FAILURE(rc))
665 ASMSetCR4(uCr4);
666
667 return rc;
668}
669
670
671/**
672 * Exits VMX root mode operation on the current CPU.
673 *
674 * @returns VBox status code.
675 */
676static int hmR0VmxLeaveRootMode(void)
677{
678 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
679
680 /* If we're for some reason not in VMX root mode, then don't leave it. */
681 if (ASMGetCR4() & X86_CR4_VMXE)
682 {
683 /* Exit VMX root mode and clear the VMX bit in CR4 */
684 VMXDisable();
685 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
686 return VINF_SUCCESS;
687 }
688
689 return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
690}
691
692
693/**
694 * Allocates and maps one physically contiguous page. The allocated page is
695 * zero'd out. (Used by various VT-x structures).
696 *
697 * @returns IPRT status code.
698 * @param pMemObj Pointer to the ring-0 memory object.
699 * @param ppVirt Where to store the virtual address of the
700 * allocation.
701 * @param pPhys Where to store the physical address of the
702 * allocation.
703 */
704DECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
705{
706 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
707 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
708 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
709
710 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
711 if (RT_FAILURE(rc))
712 return rc;
713 *ppVirt = RTR0MemObjAddress(*pMemObj);
714 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
715 ASMMemZero32(*ppVirt, PAGE_SIZE);
716 return VINF_SUCCESS;
717}
718
719
720/**
721 * Frees and unmaps an allocated physical page.
722 *
723 * @param pMemObj Pointer to the ring-0 memory object.
724 * @param ppVirt Where to re-initialize the virtual address of
725 * allocation as 0.
726 * @param pHCPhys Where to re-initialize the physical address of the
727 * allocation as 0.
728 */
729DECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
730{
731 AssertPtr(pMemObj);
732 AssertPtr(ppVirt);
733 AssertPtr(pHCPhys);
734 if (*pMemObj != NIL_RTR0MEMOBJ)
735 {
736 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
737 AssertRC(rc);
738 *pMemObj = NIL_RTR0MEMOBJ;
739 *ppVirt = 0;
740 *pHCPhys = 0;
741 }
742}
743
744
745/**
746 * Worker function to free VT-x related structures.
747 *
748 * @returns IPRT status code.
749 * @param pVM Pointer to the VM.
750 */
751static void hmR0VmxStructsFree(PVM pVM)
752{
753 for (VMCPUID i = 0; i < pVM->cCpus; i++)
754 {
755 PVMCPU pVCpu = &pVM->aCpus[i];
756 AssertPtr(pVCpu);
757
758#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
759 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
760 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
761#endif
762
763 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
764 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
765
766 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
767 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
768 }
769
770 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
771#ifdef VBOX_WITH_CRASHDUMP_MAGIC
772 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
773#endif
774}
775
776
777/**
778 * Worker function to allocate VT-x related VM structures.
779 *
780 * @returns IPRT status code.
781 * @param pVM Pointer to the VM.
782 */
783static int hmR0VmxStructsAlloc(PVM pVM)
784{
785 /*
786 * Initialize members up-front so we can cleanup properly on allocation failure.
787 */
788#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
789 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
790 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
791 pVM->hm.s.vmx.HCPhys##a_Name = 0;
792
793#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
794 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
795 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
796 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
797
798#ifdef VBOX_WITH_CRASHDUMP_MAGIC
799 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
800#endif
801 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
802
803 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
804 for (VMCPUID i = 0; i < pVM->cCpus; i++)
805 {
806 PVMCPU pVCpu = &pVM->aCpus[i];
807 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
808 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
809 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
810#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
811 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
812 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
813#endif
814 }
815#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
816#undef VMXLOCAL_INIT_VM_MEMOBJ
817
818 /*
819 * Allocate all the VT-x structures.
820 */
821 int rc = VINF_SUCCESS;
822#ifdef VBOX_WITH_CRASHDUMP_MAGIC
823 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
824 if (RT_FAILURE(rc))
825 goto cleanup;
826 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
827 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xDEADBEEFDEADBEEF);
828#endif
829
830 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
831 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
832 {
833 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
834 &pVM->hm.s.vmx.HCPhysApicAccess);
835 if (RT_FAILURE(rc))
836 goto cleanup;
837 }
838
839 /*
840 * Initialize per-VCPU VT-x structures.
841 */
842 for (VMCPUID i =0; i < pVM->cCpus; i++)
843 {
844 PVMCPU pVCpu = &pVM->aCpus[i];
845 AssertPtr(pVCpu);
846
847 /* Allocate the VM control structure (VMCS). */
848 AssertReturn(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.vmx_basic_info) <= PAGE_SIZE, VERR_INTERNAL_ERROR);
849 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
850 if (RT_FAILURE(rc))
851 goto cleanup;
852
853 /* Allocate the Virtual-APIC page for transparent TPR accesses. */
854 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
855 {
856 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
857 &pVCpu->hm.s.vmx.HCPhysVirtApic);
858 if (RT_FAILURE(rc))
859 goto cleanup;
860 }
861
862 /* Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for transparent accesses of specific MSRs. */
863 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
864 {
865 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
866 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
867 if (RT_FAILURE(rc))
868 goto cleanup;
869 memset(pVCpu->hm.s.vmx.pvMsrBitmap, 0xff, PAGE_SIZE);
870 }
871
872#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
873 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
874 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
875 if (RT_FAILURE(rc))
876 goto cleanup;
877
878 /* Allocate the VM-exit MSR-load page for the host MSRs. */
879 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
880 if (RT_FAILURE(rc))
881 goto cleanup;
882#endif
883 }
884
885 return VINF_SUCCESS;
886
887cleanup:
888 hmR0VmxStructsFree(pVM);
889 return rc;
890}
891
892
893/**
894 * Does global VT-x initialization (called during module initialization).
895 *
896 * @returns VBox status code.
897 */
898VMMR0DECL(int) VMXR0GlobalInit(void)
899{
900#ifdef HMVMX_USE_FUNCTION_TABLE
901 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
902# ifdef VBOX_STRICT
903 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
904 Assert(g_apfnVMExitHandlers[i]);
905# endif
906#endif
907 return VINF_SUCCESS;
908}
909
910
911/**
912 * Does global VT-x termination (called during module termination).
913 */
914VMMR0DECL(void) VMXR0GlobalTerm()
915{
916 /* Nothing to do currently. */
917}
918
919
920/**
921 * Sets up and activates VT-x on the current CPU.
922 *
923 * @returns VBox status code.
924 * @param pCpu Pointer to the global CPU info struct.
925 * @param pVM Pointer to the VM (can be NULL after a host resume
926 * operation).
927 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
928 * fEnabledByHost is true).
929 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
930 * @a fEnabledByHost is true).
931 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
932 * enable VT-x/AMD-V on the host.
933 */
934VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
935{
936 AssertReturn(pCpu, VERR_INVALID_PARAMETER);
937 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
938
939 if (!fEnabledByHost)
940 {
941 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
942 if (RT_FAILURE(rc))
943 return rc;
944 }
945
946 /*
947 * Flush all VPIDs (in case we or any other hypervisor have been using VPIDs) so that
948 * we can avoid an explicit flush while using new VPIDs. We would still need to flush
949 * each time while reusing a VPID after hitting the MaxASID limit once.
950 */
951 if ( pVM
952 && pVM->hm.s.vmx.fVpid
953 && (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS))
954 {
955 hmR0VmxFlushVpid(pVM, NULL /* pvCpu */, VMX_FLUSH_VPID_ALL_CONTEXTS, 0 /* GCPtr */);
956 pCpu->fFlushAsidBeforeUse = false;
957 }
958 else
959 pCpu->fFlushAsidBeforeUse = true;
960
961 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
962 ++pCpu->cTlbFlushes;
963
964 return VINF_SUCCESS;
965}
966
967
968/**
969 * Deactivates VT-x on the current CPU.
970 *
971 * @returns VBox status code.
972 * @param pCpu Pointer to the global CPU info struct.
973 * @param pvCpuPage Pointer to the VMXON region.
974 * @param HCPhysCpuPage Physical address of the VMXON region.
975 */
976VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
977{
978 NOREF(pCpu);
979 NOREF(pvCpuPage);
980 NOREF(HCPhysCpuPage);
981
982 return hmR0VmxLeaveRootMode();
983}
984
985
986/**
987 * Sets the permission bits for the specified MSR in the MSR bitmap.
988 *
989 * @param pVCpu Pointer to the VMCPU.
990 * @param uMSR The MSR value.
991 * @param enmRead Whether reading this MSR causes a VM-exit.
992 * @param enmWrite Whether writing this MSR causes a VM-exit.
993 */
994static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
995{
996 int32_t iBit;
997 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
998
999 /*
1000 * Layout:
1001 * 0x000 - 0x3ff - Low MSR read bits
1002 * 0x400 - 0x7ff - High MSR read bits
1003 * 0x800 - 0xbff - Low MSR write bits
1004 * 0xc00 - 0xfff - High MSR write bits
1005 */
1006 if (uMsr <= 0x00001FFF)
1007 iBit = uMsr;
1008 else if ( uMsr >= 0xC0000000
1009 && uMsr <= 0xC0001FFF)
1010 {
1011 iBit = (uMsr - 0xC0000000);
1012 pbMsrBitmap += 0x400;
1013 }
1014 else
1015 {
1016 AssertMsgFailed(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1017 return;
1018 }
1019
1020 Assert(iBit <= 0x1fff);
1021 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1022 ASMBitSet(pbMsrBitmap, iBit);
1023 else
1024 ASMBitClear(pbMsrBitmap, iBit);
1025
1026 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1027 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1028 else
1029 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1030}
1031
1032
1033/**
1034 * Flushes the TLB using EPT.
1035 *
1036 * @returns VBox status code.
1037 * @param pVM Pointer to the VM.
1038 * @param pVCpu Pointer to the VMCPU.
1039 * @param enmFlush Type of flush.
1040 */
1041static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
1042{
1043 AssertPtr(pVM);
1044 Assert(pVM->hm.s.fNestedPaging);
1045
1046 LogFlowFunc(("pVM=%p pVCpu=%p enmFlush=%d\n", pVM, pVCpu, enmFlush));
1047
1048 uint64_t descriptor[2];
1049 descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1050 descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1051
1052 int rc = VMXR0InvEPT(enmFlush, &descriptor[0]);
1053 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu->hm.s.vmx.HCPhysEPTP, rc));
1054 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1055}
1056
1057
1058/**
1059 * Flushes the TLB using VPID.
1060 *
1061 * @returns VBox status code.
1062 * @param pVM Pointer to the VM.
1063 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1064 * enmFlush).
1065 * @param enmFlush Type of flush.
1066 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1067 * on @a enmFlush).
1068 */
1069static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr)
1070{
1071 AssertPtr(pVM);
1072 Assert(pVM->hm.s.vmx.fVpid);
1073
1074 uint64_t descriptor[2];
1075 if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS)
1076 {
1077 descriptor[0] = 0;
1078 descriptor[1] = 0;
1079 }
1080 else
1081 {
1082 AssertPtr(pVCpu);
1083 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1084 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1085 descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1086 descriptor[1] = GCPtr;
1087 }
1088
1089 int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc);
1090 AssertMsg(rc == VINF_SUCCESS,
1091 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1092 if ( RT_SUCCESS(rc)
1093 && pVCpu)
1094 {
1095 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1096 }
1097}
1098
1099
1100/**
1101 * Invalidates a guest page by guest virtual address. Only relevant for
1102 * EPT/VPID, otherwise there is nothing really to invalidate.
1103 *
1104 * @returns VBox status code.
1105 * @param pVM Pointer to the VM.
1106 * @param pVCpu Pointer to the VMCPU.
1107 * @param GCVirt Guest virtual address of the page to invalidate.
1108 */
1109VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1110{
1111 AssertPtr(pVM);
1112 AssertPtr(pVCpu);
1113 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
1114
1115 bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1116 if (!fFlushPending)
1117 {
1118 /*
1119 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
1120 * See @bugref{6043} and @bugref{6177}.
1121 *
1122 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
1123 * function maybe called in a loop with individual addresses.
1124 */
1125 if (pVM->hm.s.vmx.fVpid)
1126 {
1127 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1128 {
1129 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt);
1130 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1131 }
1132 else
1133 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1134 }
1135 else if (pVM->hm.s.fNestedPaging)
1136 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1137 }
1138
1139 return VINF_SUCCESS;
1140}
1141
1142
1143/**
1144 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
1145 * otherwise there is nothing really to invalidate.
1146 *
1147 * @returns VBox status code.
1148 * @param pVM Pointer to the VM.
1149 * @param pVCpu Pointer to the VMCPU.
1150 * @param GCPhys Guest physical address of the page to invalidate.
1151 */
1152VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1153{
1154 LogFlowFunc(("%RGp\n", GCPhys));
1155
1156 /*
1157 * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
1158 * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
1159 * This function might be called in a loop.
1160 */
1161 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1162 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
1163 return VINF_SUCCESS;
1164}
1165
1166
1167/**
1168 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1169 * case where neither EPT nor VPID is supported by the CPU.
1170 *
1171 * @param pVM Pointer to the VM.
1172 * @param pVCpu Pointer to the VMCPU.
1173 *
1174 * @remarks Called with interrupts disabled.
1175 */
1176static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu)
1177{
1178 NOREF(pVM);
1179 AssertPtr(pVCpu);
1180 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1181 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1182
1183 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1184 AssertPtr(pCpu);
1185
1186 pVCpu->hm.s.TlbShootdown.cPages = 0;
1187 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1188 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1189 pVCpu->hm.s.fForceTLBFlush = false;
1190 return;
1191}
1192
1193
1194/**
1195 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1196 *
1197 * @param pVM Pointer to the VM.
1198 * @param pVCpu Pointer to the VMCPU.
1199 * @remarks All references to "ASID" in this function pertains to "VPID" in
1200 * Intel's nomenclature. The reason is, to avoid confusion in compare
1201 * statements since the host-CPU copies are named "ASID".
1202 *
1203 * @remarks Called with interrupts disabled.
1204 */
1205static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu)
1206{
1207 AssertPtr(pVM);
1208 AssertPtr(pVCpu);
1209 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1210 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1211 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1212
1213 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1214 AssertPtr(pCpu);
1215
1216 /*
1217 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1218 * This can happen both for start & resume due to long jumps back to ring-3.
1219 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1220 * or the host Cpu is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1221 */
1222 bool fNewASID = false;
1223 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1224 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1225 {
1226 pVCpu->hm.s.fForceTLBFlush = true;
1227 fNewASID = true;
1228 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1229 }
1230
1231 /*
1232 * Check for explicit TLB shootdowns.
1233 */
1234 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1235 {
1236 pVCpu->hm.s.fForceTLBFlush = true;
1237 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1238 }
1239
1240 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1241 if (pVCpu->hm.s.fForceTLBFlush)
1242 {
1243 if (fNewASID)
1244 {
1245 ++pCpu->uCurrentAsid;
1246 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1247 {
1248 pCpu->uCurrentAsid = 1; /* start at 1; host uses 0 */
1249 pCpu->cTlbFlushes++;
1250 pCpu->fFlushAsidBeforeUse = true;
1251 }
1252
1253 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1254 if (pCpu->fFlushAsidBeforeUse)
1255 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1256 }
1257 else
1258 {
1259 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
1260 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_SINGLE_CONTEXT, 0 /* GCPtr */);
1261 else
1262 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1263 }
1264
1265 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1266 pVCpu->hm.s.fForceTLBFlush = false;
1267 }
1268 else
1269 {
1270 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
1271 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
1272 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
1273 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
1274
1275 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1276 * not be executed. See hmQueueInvlPage() where it is commented
1277 * out. Support individual entry flushing someday. */
1278 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1279 {
1280 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1281
1282 /*
1283 * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
1284 * as supported by the CPU.
1285 */
1286 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1287 {
1288 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1289 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1290 }
1291 else
1292 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1293 }
1294 else
1295 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1296 }
1297 pVCpu->hm.s.TlbShootdown.cPages = 0;
1298 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1299
1300 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1301 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1302 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1303 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1304 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1305 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1306
1307 /* Update VMCS with the VPID. */
1308 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1309 AssertRC(rc);
1310}
1311
1312
1313/**
1314 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
1315 *
1316 * @returns VBox status code.
1317 * @param pVM Pointer to the VM.
1318 * @param pVCpu Pointer to the VMCPU.
1319 *
1320 * @remarks Called with interrupts disabled.
1321 */
1322static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu)
1323{
1324 AssertPtr(pVM);
1325 AssertPtr(pVCpu);
1326 AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
1327 AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
1328
1329 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1330 AssertPtr(pCpu);
1331
1332 /*
1333 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1334 * This can happen both for start & resume due to long jumps back to ring-3.
1335 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
1336 */
1337 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1338 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1339 {
1340 pVCpu->hm.s.fForceTLBFlush = true;
1341 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1342 }
1343
1344 /* Check for explicit TLB shootdown flushes. */
1345 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1346 {
1347 pVCpu->hm.s.fForceTLBFlush = true;
1348 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1349 }
1350
1351 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1352 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1353
1354 if (pVCpu->hm.s.fForceTLBFlush)
1355 {
1356 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1357 pVCpu->hm.s.fForceTLBFlush = false;
1358 }
1359 else
1360 {
1361 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1362 * not be executed. See hmQueueInvlPage() where it is commented
1363 * out. Support individual entry flushing someday. */
1364 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1365 {
1366 /* We cannot flush individual entries without VPID support. Flush using EPT. */
1367 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1368 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1369 }
1370 else
1371 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1372 }
1373
1374 pVCpu->hm.s.TlbShootdown.cPages = 0;
1375 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1376}
1377
1378
1379/**
1380 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
1381 *
1382 * @returns VBox status code.
1383 * @param pVM Pointer to the VM.
1384 * @param pVCpu Pointer to the VMCPU.
1385 *
1386 * @remarks Called with interrupts disabled.
1387 */
1388static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu)
1389{
1390 AssertPtr(pVM);
1391 AssertPtr(pVCpu);
1392 AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
1393 AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
1394
1395 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1396
1397 /*
1398 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
1399 * This can happen both for start & resume due to long jumps back to ring-3.
1400 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1401 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1402 */
1403 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1404 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1405 {
1406 pVCpu->hm.s.fForceTLBFlush = true;
1407 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1408 }
1409
1410 /* Check for explicit TLB shootdown flushes. */
1411 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1412 {
1413 /*
1414 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
1415 * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
1416 * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
1417 */
1418 pVCpu->hm.s.fForceTLBFlush = true;
1419 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1420 }
1421
1422 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1423 if (pVCpu->hm.s.fForceTLBFlush)
1424 {
1425 ++pCpu->uCurrentAsid;
1426 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1427 {
1428 pCpu->uCurrentAsid = 1; /* start at 1; host uses 0 */
1429 pCpu->fFlushAsidBeforeUse = true;
1430 pCpu->cTlbFlushes++;
1431 }
1432
1433 pVCpu->hm.s.fForceTLBFlush = false;
1434 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1435 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1436 if (pCpu->fFlushAsidBeforeUse)
1437 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1438 }
1439 else
1440 {
1441 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
1442 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
1443 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
1444 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
1445
1446 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1447 * not be executed. See hmQueueInvlPage() where it is commented
1448 * out. Support individual entry flushing someday. */
1449 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1450 {
1451 /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
1452 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1453 {
1454 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1455 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1456 }
1457 else
1458 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1459 }
1460 else
1461 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1462 }
1463
1464 pVCpu->hm.s.TlbShootdown.cPages = 0;
1465 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1466
1467 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1468 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1469 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1470 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1471 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1472 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1473
1474 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1475 AssertRC(rc);
1476}
1477
1478
1479/**
1480 * Flushes the guest TLB entry based on CPU capabilities.
1481 *
1482 * @param pVCpu Pointer to the VMCPU.
1483 */
1484DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu)
1485{
1486 PVM pVM = pVCpu->CTX_SUFF(pVM);
1487 switch (pVM->hm.s.vmx.uFlushTaggedTlb)
1488 {
1489 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu); break;
1490 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu); break;
1491 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu); break;
1492 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu); break;
1493 default:
1494 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
1495 break;
1496 }
1497}
1498
1499
1500/**
1501 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
1502 * TLB entries from the host TLB before VM-entry.
1503 *
1504 * @returns VBox status code.
1505 * @param pVM Pointer to the VM.
1506 */
1507static int hmR0VmxSetupTaggedTlb(PVM pVM)
1508{
1509 /*
1510 * Determine optimal flush type for nested paging.
1511 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
1512 * guest execution (see hmR3InitFinalizeR0()).
1513 */
1514 if (pVM->hm.s.fNestedPaging)
1515 {
1516 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
1517 {
1518 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
1519 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT;
1520 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1521 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS;
1522 else
1523 {
1524 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
1525 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1526 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1527 }
1528
1529 /* Make sure the write-back cacheable memory type for EPT is supported. */
1530 if (!(pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
1531 {
1532 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.msr.vmx_ept_vpid_caps));
1533 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1534 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1535 }
1536 }
1537 else
1538 {
1539 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
1540 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1541 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1542 }
1543 }
1544
1545 /*
1546 * Determine optimal flush type for VPID.
1547 */
1548 if (pVM->hm.s.vmx.fVpid)
1549 {
1550 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
1551 {
1552 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
1553 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT;
1554 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
1555 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS;
1556 else
1557 {
1558 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
1559 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1560 LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
1561 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1562 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
1563 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1564 pVM->hm.s.vmx.fVpid = false;
1565 }
1566 }
1567 else
1568 {
1569 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
1570 Log(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
1571 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1572 pVM->hm.s.vmx.fVpid = false;
1573 }
1574 }
1575
1576 /*
1577 * Setup the handler for flushing tagged-TLBs.
1578 */
1579 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
1580 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID;
1581 else if (pVM->hm.s.fNestedPaging)
1582 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT;
1583 else if (pVM->hm.s.vmx.fVpid)
1584 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID;
1585 else
1586 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE;
1587 return VINF_SUCCESS;
1588}
1589
1590
1591/**
1592 * Sets up pin-based VM-execution controls in the VMCS.
1593 *
1594 * @returns VBox status code.
1595 * @param pVM Pointer to the VM.
1596 * @param pVCpu Pointer to the VMCPU.
1597 */
1598static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
1599{
1600 AssertPtr(pVM);
1601 AssertPtr(pVCpu);
1602
1603 uint32_t val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; /* Bits set here must always be set. */
1604 uint32_t zap = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1; /* Bits cleared here must always be cleared. */
1605
1606 val |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts causes a VM-exits. */
1607 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts causes a VM-exit. */
1608 Assert(!(val & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI));
1609
1610 /* Enable the VMX preemption timer. */
1611 if (pVM->hm.s.vmx.fUsePreemptTimer)
1612 {
1613 Assert(pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
1614 val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
1615 }
1616
1617 if ((val & zap) != val)
1618 {
1619 LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1620 pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0, val, zap));
1621 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1622 }
1623
1624 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, val);
1625 AssertRCReturn(rc, rc);
1626
1627 /* Update VCPU with the currently set pin-based VM-execution controls. */
1628 pVCpu->hm.s.vmx.u32PinCtls = val;
1629 return rc;
1630}
1631
1632
1633/**
1634 * Sets up processor-based VM-execution controls in the VMCS.
1635 *
1636 * @returns VBox status code.
1637 * @param pVM Pointer to the VM.
1638 * @param pVMCPU Pointer to the VMCPU.
1639 */
1640static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
1641{
1642 AssertPtr(pVM);
1643 AssertPtr(pVCpu);
1644
1645 int rc = VERR_INTERNAL_ERROR_5;
1646 uint32_t val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0; /* Bits set here must be set in the VMCS. */
1647 uint32_t zap = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1648
1649 val |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */
1650 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
1651 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
1652 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
1653 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */
1654 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */
1655 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
1656
1657 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
1658 if ( !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
1659 || (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
1660 {
1661 LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
1662 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1663 }
1664
1665 /* Without nested paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
1666 if (!pVM->hm.s.fNestedPaging)
1667 {
1668 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
1669 val |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
1670 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
1671 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
1672 }
1673
1674 /* Use TPR shadowing if supported by the CPU. */
1675 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
1676 {
1677 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
1678 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
1679 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
1680 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
1681 AssertRCReturn(rc, rc);
1682
1683 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
1684 /* CR8 writes causes a VM-exit based on TPR threshold. */
1685 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
1686 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
1687 }
1688 else
1689 {
1690 val |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads causes a VM-exit. */
1691 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes causes a VM-exit. */
1692 }
1693
1694 /* Use MSR-bitmaps if supported by the CPU. */
1695 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1696 {
1697 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
1698
1699 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1700 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
1701 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1702 AssertRCReturn(rc, rc);
1703
1704 /*
1705 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
1706 * automatically (either as part of the MSR-load/store areas or dedicated fields in the VMCS).
1707 */
1708 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1709 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1710 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1711 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1712 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1713 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1714 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1715 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1716 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1717 }
1718
1719 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
1720 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1721 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
1722
1723 if ((val & zap) != val)
1724 {
1725 LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1726 pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0, val, zap));
1727 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1728 }
1729
1730 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, val);
1731 AssertRCReturn(rc, rc);
1732
1733 /* Update VCPU with the currently set processor-based VM-execution controls. */
1734 pVCpu->hm.s.vmx.u32ProcCtls = val;
1735
1736 /*
1737 * Secondary processor-based VM-execution controls.
1738 */
1739 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
1740 {
1741 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
1742 zap = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1743
1744 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
1745 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
1746
1747 if (pVM->hm.s.fNestedPaging)
1748 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */
1749 else
1750 {
1751 /*
1752 * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
1753 * VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT when INVPCID is executed by the guest.
1754 * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
1755 */
1756 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
1757 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
1758 }
1759
1760 if (pVM->hm.s.vmx.fVpid)
1761 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */
1762
1763 if (pVM->hm.s.vmx.fUnrestrictedGuest)
1764 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
1765
1766 /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
1767 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
1768 * done dynamically. */
1769 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
1770 {
1771 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
1772 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
1773 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
1774 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
1775 AssertRCReturn(rc, rc);
1776 }
1777
1778 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
1779 {
1780 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
1781 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1782 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1783 }
1784
1785 if ((val & zap) != val)
1786 {
1787 LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! "
1788 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0, val, zap));
1789 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1790 }
1791
1792 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, val);
1793 AssertRCReturn(rc, rc);
1794
1795 /* Update VCPU with the currently set secondary processor-based VM-execution controls. */
1796 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
1797 }
1798
1799 return VINF_SUCCESS;
1800}
1801
1802
1803/**
1804 * Sets up miscellaneous (everything other than Pin & Processor-based
1805 * VM-execution) control fields in the VMCS.
1806 *
1807 * @returns VBox status code.
1808 * @param pVM Pointer to the VM.
1809 * @param pVCpu Pointer to the VMCPU.
1810 */
1811static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
1812{
1813 AssertPtr(pVM);
1814 AssertPtr(pVCpu);
1815
1816 /** @todo Shouldn't we able to avoid initializing with 0? */
1817 int rc = VERR_GENERAL_FAILURE;
1818
1819 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestControlRegs())*/
1820 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); AssertRCReturn(rc, rc);
1821 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); AssertRCReturn(rc, rc);
1822
1823 /*
1824 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
1825 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
1826 * We thus use the exception bitmap to control it rather than use both.
1827 */
1828 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0); AssertRCReturn(rc, rc);
1829 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); AssertRCReturn(rc, rc);
1830
1831 /** @todo Explore possibility of using IO-bitmaps. */
1832 /* All IO & IOIO instructions cause VM-exits. */
1833 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); AssertRCReturn(rc, rc);
1834 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); AssertRCReturn(rc, rc);
1835
1836 /* Initialize the MSR-bitmap area. */
1837 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
1838 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); AssertRCReturn(rc, rc);
1839 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
1840
1841#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
1842 /* Setup MSR autoloading/storing. */
1843 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
1844 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
1845 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1846 AssertRCReturn(rc, rc);
1847 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1848 AssertRCReturn(rc, rc);
1849
1850 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
1851 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
1852 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
1853 AssertRCReturn(rc, rc);
1854#endif
1855
1856 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
1857 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
1858 AssertRCReturn(rc, rc);
1859
1860 /* Setup debug controls */
1861 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
1862 AssertRCReturn(rc, rc);
1863 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
1864 AssertRCReturn(rc, rc);
1865 return rc;
1866}
1867
1868
1869/**
1870 * Sets up the initial exception bitmap in the VMCS based on static conditions
1871 * (i.e. conditions that cannot ever change at runtime).
1872 *
1873 * @returns VBox status code.
1874 * @param pVM Pointer to the VM.
1875 * @param pVCpu Pointer to the VMCPU.
1876 */
1877static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
1878{
1879 AssertPtr(pVM);
1880 AssertPtr(pVCpu);
1881
1882 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
1883
1884 uint32_t u32XcptBitmap = 0;
1885
1886 /* Without nested paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
1887 if (!pVM->hm.s.fNestedPaging)
1888 u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
1889
1890 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
1891 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
1892 AssertRCReturn(rc, rc);
1893 return rc;
1894}
1895
1896
1897/**
1898 * Sets up the initial guest-state mask. The guest-state mask is consulted
1899 * before reading guest-state fields from the VMCS as VMREADs can be expensive
1900 * for the nested virtualization case (as it would cause a VM-exit).
1901 *
1902 * @param pVCpu Pointer to the VMCPU.
1903 */
1904static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
1905{
1906 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
1907 pVCpu->hm.s.vmx.fUpdatedGuestState = HMVMX_UPDATED_GUEST_ALL;
1908 return VINF_SUCCESS;
1909}
1910
1911
1912/**
1913 * Does per-VM VT-x initialization.
1914 *
1915 * @returns VBox status code.
1916 * @param pVM Pointer to the VM.
1917 */
1918VMMR0DECL(int) VMXR0InitVM(PVM pVM)
1919{
1920 LogFlowFunc(("pVM=%p\n", pVM));
1921
1922 int rc = hmR0VmxStructsAlloc(pVM);
1923 if (RT_FAILURE(rc))
1924 {
1925 LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
1926 return rc;
1927 }
1928
1929 return VINF_SUCCESS;
1930}
1931
1932
1933/**
1934 * Does per-VM VT-x termination.
1935 *
1936 * @returns VBox status code.
1937 * @param pVM Pointer to the VM.
1938 */
1939VMMR0DECL(int) VMXR0TermVM(PVM pVM)
1940{
1941 LogFlowFunc(("pVM=%p\n", pVM));
1942
1943#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1944 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
1945 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
1946#endif
1947 hmR0VmxStructsFree(pVM);
1948 return VINF_SUCCESS;
1949}
1950
1951
1952/**
1953 * Sets up the VM for execution under VT-x.
1954 * This function is only called once per-VM during initalization.
1955 *
1956 * @returns VBox status code.
1957 * @param pVM Pointer to the VM.
1958 */
1959VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
1960{
1961 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
1962 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1963
1964 LogFlowFunc(("pVM=%p\n", pVM));
1965
1966 /*
1967 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
1968 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0().
1969 */
1970 /* -XXX- change hmR3InitFinalizeR0() to fail if pRealModeTSS alloc fails. */
1971 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
1972 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
1973 || !pVM->hm.s.vmx.pRealModeTSS))
1974 {
1975 LogRel(("VMXR0SetupVM: invalid real-on-v86 state.\n"));
1976 return VERR_INTERNAL_ERROR;
1977 }
1978
1979 /* Initialize these always, see hmR3InitFinalizeR0().*/
1980 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NONE;
1981 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NONE;
1982
1983 /* Setup the tagged-TLB flush handlers. */
1984 int rc = hmR0VmxSetupTaggedTlb(pVM);
1985 if (RT_FAILURE(rc))
1986 {
1987 LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
1988 return rc;
1989 }
1990
1991 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1992 {
1993 PVMCPU pVCpu = &pVM->aCpus[i];
1994 AssertPtr(pVCpu);
1995 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
1996
1997 /* Set revision dword at the beginning of the VMCS structure. */
1998 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
1999
2000 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
2001 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2002 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2003 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2004
2005 /* Load this VMCS as the current VMCS. */
2006 rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2007 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2008 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2009
2010 rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
2011 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2012 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2013
2014 rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
2015 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2016 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2017
2018 rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
2019 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2020 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2021
2022 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
2023 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2024 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2025
2026 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
2027 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2028 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2029
2030#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2031 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
2032 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2033 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2034#endif
2035
2036 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
2037 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2038 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2039 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2040
2041 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
2042 }
2043
2044 return VINF_SUCCESS;
2045}
2046
2047
2048/**
2049 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2050 * the VMCS.
2051 *
2052 * @returns VBox status code.
2053 * @param pVM Pointer to the VM.
2054 * @param pVCpu Pointer to the VMCPU.
2055 */
2056DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
2057{
2058 RTCCUINTREG uReg = ASMGetCR0();
2059 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2060 AssertRCReturn(rc, rc);
2061
2062#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2063 /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */
2064 if (HMVMX_IS_64BIT_HOST_MODE())
2065 {
2066 uint64_t uRegCR3 = HMR0Get64bitCR3();
2067 rc = VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3);
2068 }
2069 else
2070#endif
2071 {
2072 uReg = ASMGetCR3();
2073 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2074 }
2075 AssertRCReturn(rc, rc);
2076
2077 uReg = ASMGetCR4();
2078 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2079 AssertRCReturn(rc, rc);
2080 return rc;
2081}
2082
2083
2084/**
2085 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2086 * the host-state area in the VMCS.
2087 *
2088 * @returns VBox status code.
2089 * @param pVM Pointer to the VM.
2090 * @param pVCpu Pointer to the VMCPU.
2091 */
2092DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
2093{
2094 int rc = VERR_INTERNAL_ERROR_5;
2095 RTSEL uSelCS = 0;
2096 RTSEL uSelSS = 0;
2097 RTSEL uSelDS = 0;
2098 RTSEL uSelES = 0;
2099 RTSEL uSelFS = 0;
2100 RTSEL uSelGS = 0;
2101 RTSEL uSelTR = 0;
2102
2103 /*
2104 * Host Selector registers.
2105 */
2106#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2107 if (HMVMX_IS_64BIT_HOST_MODE())
2108 {
2109 uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;
2110 uSelSS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;
2111 }
2112 else
2113 {
2114 /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */
2115 uSelCS = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;
2116 uSelSS = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;
2117 }
2118#else
2119 uSelCS = ASMGetCS();
2120 uSelSS = ASMGetSS();
2121#endif
2122
2123 /* Note: VT-x is picky about the RPL of the selectors here; we'll restore them manually. */
2124 uSelTR = ASMGetTR();
2125
2126 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2127 /** @todo Verify if we have any platform that actually run with DS or ES with
2128 * RPL != 0 in kernel space. */
2129 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2130 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2131 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2132 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2133 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2134 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2135 Assert(uSelCS != 0);
2136 Assert(uSelTR != 0);
2137
2138 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2139#if 0
2140 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE))
2141 Assert(uSelSS != 0);
2142#endif
2143
2144 /* Write these host selector fields into the host-state area in the VMCS. */
2145 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); AssertRCReturn(rc, rc);
2146 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); AssertRCReturn(rc, rc);
2147 /* Avoid the VMWRITEs as we set the following segments to 0 and the VMCS fields are already 0 (since g_HvmR0 is static) */
2148#if 0
2149 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); AssertRCReturn(rc, rc);
2150 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); AssertRCReturn(rc, rc);
2151 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS); AssertRCReturn(rc, rc);
2152 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS); AssertRCReturn(rc, rc);
2153#endif
2154 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR); AssertRCReturn(rc, rc);
2155
2156 /*
2157 * Host GDTR and IDTR.
2158 */
2159 /** @todo Despite VT-x -not- restoring the limits on GDTR and IDTR it should
2160 * be safe to -not- save and restore GDTR and IDTR in the assembly
2161 * code and just do it here and don't care if the limits are zapped on
2162 * VM-exit. */
2163 RTGDTR Gdtr;
2164 RT_ZERO(Gdtr);
2165#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2166 if (HMVMX_IS_64BIT_HOST_MODE())
2167 {
2168 X86XDTR64 Gdtr64;
2169 X86XDTR64 Idtr64;
2170 HMR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64);
2171 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr); AssertRCReturn(rc, rc);
2172 rc = VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr); AssertRCReturn(rc, rc);
2173
2174 Gdtr.cbGdt = Gdtr64.cb;
2175 Gdtr.pGdt = (uintptr_t)Gdtr64.uAddr;
2176 }
2177 else
2178#endif
2179 {
2180 RTIDTR Idtr;
2181 ASMGetGDTR(&Gdtr);
2182 ASMGetIDTR(&Idtr);
2183 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); AssertRCReturn(rc, rc);
2184 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); AssertRCReturn(rc, rc);
2185 }
2186
2187 /*
2188 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
2189 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
2190 */
2191 if ((uSelTR & X86_SEL_MASK) > Gdtr.cbGdt)
2192 {
2193 AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit.TR=%RTsel Gdtr.cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));
2194 return VERR_VMX_INVALID_HOST_STATE;
2195 }
2196
2197 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
2198#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2199 if (HMVMX_IS_64BIT_HOST_MODE())
2200 {
2201 /* We need the 64-bit TR base for hybrid darwin. */
2202 uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc);
2203 rc = VMXWriteVmcs64(VMX_VMCS_HOST_TR_BASE, u64TRBase);
2204 }
2205 else
2206#endif
2207 {
2208 uintptr_t uTRBase;
2209#if HC_ARCH_BITS == 64
2210 uTRBase = X86DESC64_BASE(pDesc);
2211#else
2212 uTRBase = X86DESC_BASE(pDesc);
2213#endif
2214 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
2215 }
2216 AssertRCReturn(rc, rc);
2217
2218 /*
2219 * Host FS base and GS base.
2220 * For 32-bit hosts the base is handled by the assembly code where we push/pop FS and GS which .
2221 * would take care of the bases. In 64-bit, the MSRs come into play.
2222 */
2223#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2224 if (HMVMX_IS_64BIT_HOST_MODE())
2225 {
2226 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
2227 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
2228 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase);
2229 AssertRCReturn(rc, rc);
2230 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase);
2231 AssertRCReturn(rc, rc);
2232 }
2233#endif
2234 return rc;
2235}
2236
2237
2238/**
2239 * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
2240 * host-state area of the VMCS. Theses MSRs will be automatically restored on
2241 * the host after every successful VM exit.
2242 *
2243 * @returns VBox status code.
2244 * @param pVM Pointer to the VM.
2245 * @param pVCpu Pointer to the VMCPU.
2246 */
2247DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
2248{
2249 AssertPtr(pVCpu);
2250 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
2251
2252 int rc = VINF_SUCCESS;
2253#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
2254 PVMXMSR pHostMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMsr;
2255 uint32_t cHostMsrs = 0;
2256 uint32_t u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
2257
2258 if (u32HostExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
2259 {
2260 pHostMsr->u32IndexMSR = MSR_K6_EFER;
2261 pHostMsr->u32Reserved = 0;
2262# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2263 if (CPUMIsGuestInLongMode(pVCpu))
2264 {
2265 /* Must match the EFER value in our 64 bits switcher. */
2266 pHostMsr->u64Value = ASMRdMsr(MSR_K6_EFER) | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE;
2267 }
2268 else
2269# endif
2270 pHostMsr->u64Value = ASMRdMsr(MSR_K6_EFER);
2271 pHostMsr++; cHostMsrs++;
2272 }
2273
2274# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2275 if (HMVMX_IS_64BIT_HOST_MODE())
2276 {
2277 pHostMsr->u32IndexMSR = MSR_K6_STAR;
2278 pHostMsr->u32Reserved = 0;
2279 pHostMsr->u64Value = ASMRdMsr(MSR_K6_STAR); /* legacy syscall eip, cs & ss */
2280 pHostMsr++; cHostMsrs++;
2281 pHostMsr->u32IndexMSR = MSR_K8_LSTAR;
2282 pHostMsr->u32Reserved = 0;
2283 pHostMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64-bit mode syscall rip */
2284 pHostMsr++; cHostMsrs++;
2285 pHostMsr->u32IndexMSR = MSR_K8_SF_MASK;
2286 pHostMsr->u32Reserved = 0;
2287 pHostMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */
2288 pHostMsr++; cHostMsrs++;
2289 pHostMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
2290 pHostMsr->u32Reserved = 0;
2291 pHostMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */
2292 pHostMsr++; cHostMsrs++;
2293 }
2294# endif
2295
2296 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
2297 if (RT_UNLIKELY(cHostMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)))
2298 {
2299 LogRel(("cHostMsrs=%u Cpu=%u\n", cHostMsrs, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)));
2300 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2301 }
2302
2303 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs);
2304#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
2305
2306 /*
2307 * Host Sysenter MSRs.
2308 */
2309 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
2310 AssertRCReturn(rc, rc);
2311# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2312 if (HMVMX_IS_64BIT_HOST_MODE())
2313 {
2314 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2315 AssertRCReturn(rc, rc);
2316 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2317 }
2318 else
2319 {
2320 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2321 AssertRCReturn(rc, rc);
2322 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2323 }
2324# elif HC_ARCH_BITS == 32
2325 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2326 AssertRCReturn(rc, rc);
2327 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2328# else
2329 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2330 AssertRCReturn(rc, rc);
2331 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2332# endif
2333 AssertRCReturn(rc, rc);
2334
2335 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT, IA32_EFER, also see
2336 * hmR0VmxSetupExitCtls() !! */
2337 return rc;
2338}
2339
2340
2341/**
2342 * Sets up VM-entry controls in the VMCS. These controls can affect things done
2343 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
2344 * controls".
2345 *
2346 * @returns VBox status code.
2347 * @param pVCpu Pointer to the VMCPU.
2348 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2349 * out-of-sync. Make sure to update the required fields
2350 * before using them.
2351 *
2352 * @remarks No-long-jump zone!!!
2353 */
2354DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2355{
2356 int rc = VINF_SUCCESS;
2357 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_ENTRY_CTLS)
2358 {
2359 PVM pVM = pVCpu->CTX_SUFF(pVM);
2360 uint32_t val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0; /* Bits set here must be set in the VMCS. */
2361 uint32_t zap = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2362
2363 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
2364 val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
2365
2366 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
2367 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2368 val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
2369 else
2370 Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
2371
2372 /*
2373 * The following should not be set (since we're not in SMM mode):
2374 * - VMX_VMCS_CTRL_ENTRY_ENTRY_SMM
2375 * - VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON
2376 */
2377
2378 /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
2379 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR,
2380 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR */
2381
2382 if ((val & zap) != val)
2383 {
2384 LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2385 pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0, val, zap));
2386 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2387 }
2388
2389 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, val);
2390 AssertRCReturn(rc, rc);
2391
2392 /* Update VCPU with the currently set VM-exit controls. */
2393 pVCpu->hm.s.vmx.u32EntryCtls = val;
2394 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_ENTRY_CTLS;
2395 }
2396 return rc;
2397}
2398
2399
2400/**
2401 * Sets up the VM-exit controls in the VMCS.
2402 *
2403 * @returns VBox status code.
2404 * @param pVM Pointer to the VM.
2405 * @param pVCpu Pointer to the VMCPU.
2406 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2407 * out-of-sync. Make sure to update the required fields
2408 * before using them.
2409 *
2410 * @remarks requires EFER.
2411 */
2412DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2413{
2414 int rc = VINF_SUCCESS;
2415 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_EXIT_CTLS)
2416 {
2417 PVM pVM = pVCpu->CTX_SUFF(pVM);
2418 uint32_t val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0; /* Bits set here must be set in the VMCS. */
2419 uint32_t zap = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2420
2421 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
2422 val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
2423
2424 /* Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary. */
2425#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2426 if (HMVMX_IS_64BIT_HOST_MODE())
2427 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
2428 else
2429 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
2430#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
2431 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2432 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE; /* The switcher goes to long mode. */
2433 else
2434 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
2435#endif
2436
2437 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
2438 Assert(!(val & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
2439
2440 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
2441 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
2442 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR,
2443 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR,
2444 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR. */
2445
2446 if (pVM->hm.s.vmx.msr.vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)
2447 val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
2448
2449 if ((val & zap) != val)
2450 {
2451 LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2452 pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0, val, zap));
2453 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2454 }
2455
2456 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, val);
2457 AssertRCReturn(rc, rc);
2458
2459 /* Update VCPU with the currently set VM-exit controls. */
2460 pVCpu->hm.s.vmx.u32ExitCtls = val;
2461 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_EXIT_CTLS;
2462 }
2463 return rc;
2464}
2465
2466
2467/**
2468 * Loads the guest APIC and related state.
2469 *
2470 * @returns VBox status code.
2471 * @param pVM Pointer to the VM.
2472 * @param pVCpu Pointer to the VMCPU.
2473 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2474 * out-of-sync. Make sure to update the required fields
2475 * before using them.
2476 */
2477DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2478{
2479 int rc = VINF_SUCCESS;
2480 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_APIC_STATE)
2481 {
2482 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
2483 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
2484 {
2485 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2486
2487 bool fPendingIntr = false;
2488 uint8_t u8GuestTpr = 0;
2489 rc = PDMApicGetTPR(pVCpu, &u8GuestTpr, &fPendingIntr);
2490 AssertRCReturn(rc, rc);
2491
2492 /*
2493 * If there are external interrupts pending but masked by the TPR value, apply the threshold so that if the guest
2494 * lowers the TPR, it would cause a VM-exit and we can deliver the interrupt.
2495 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
2496 * the interrupt when we VM-exit for other reasons.
2497 */
2498 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8GuestTpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
2499 /* Bits 3-0 of the TPR threshold field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
2500 uint32_t u32TprThreshold = fPendingIntr ? (u8GuestTpr >> 4) : 0;
2501 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
2502
2503 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
2504 AssertRCReturn(rc, rc);
2505
2506 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
2507 if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive)
2508 {
2509 Assert(!CPUMIsGuestInLongModeEx(pMixedCtx)); /* EFER always up-to-date. */
2510 pMixedCtx->msrLSTAR = u8GuestTpr;
2511 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
2512 {
2513 /* If there are interrupts pending, intercept CR8 writes, otherwise don't intercept CR8 reads or writes. */
2514 if (fPendingIntr)
2515 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_INTERCEPT_WRITE);
2516 else
2517 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2518 }
2519 }
2520 }
2521
2522 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_APIC_STATE;
2523 }
2524 return rc;
2525}
2526
2527
2528/**
2529 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
2530 *
2531 * @returns
2532 * @param pVCpu Pointer to the VMCPU.
2533 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2534 * out-of-sync. Make sure to update the required fields
2535 * before using them.
2536 *
2537 * @remarks No-long-jump zone!!!
2538 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
2539 */
2540DECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2541{
2542 /*
2543 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
2544 * inhibit interrupts or clear any existing interrupt-inhibition.
2545 */
2546 uint32_t uIntrState = 0;
2547 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2548 {
2549 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
2550 AssertMsg((pVCpu->hm.s.vmx.fUpdatedGuestState & (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS))
2551 == (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS), ("%#x\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
2552 if (pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
2553 {
2554 /*
2555 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
2556 * VT-x the flag's condition to be cleared is met and thus the cleared state is correct.
2557 */
2558 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2559 }
2560 else if (pMixedCtx->eflags.Bits.u1IF)
2561 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
2562 else
2563 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
2564 }
2565 return uIntrState;
2566}
2567
2568
2569/**
2570 * Loads the guest's interruptibility-state into the guest-state area in the
2571 * VMCS.
2572 *
2573 * @returns VBox status code.
2574 * @param pVCpu Pointer to the VMCPU.
2575 * @param uIntrState The interruptibility-state to set.
2576 */
2577static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
2578{
2579 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */
2580 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
2581 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
2582 AssertRCReturn(rc, rc);
2583 return rc;
2584}
2585
2586
2587/**
2588 * Loads the guest's RIP into the guest-state area in the VMCS.
2589 *
2590 * @returns VBox status code.
2591 * @param pVCpu Pointer to the VMCPU.
2592 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2593 * out-of-sync. Make sure to update the required fields
2594 * before using them.
2595 *
2596 * @remarks No-long-jump zone!!!
2597 */
2598static int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2599{
2600 int rc = VINF_SUCCESS;
2601 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RIP)
2602 {
2603 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
2604 AssertRCReturn(rc, rc);
2605 Log(("Load: VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip));
2606 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP;
2607 }
2608 return rc;
2609}
2610
2611
2612/**
2613 * Loads the guest's RSP into the guest-state area in the VMCS.
2614 *
2615 * @returns VBox status code.
2616 * @param pVCpu Pointer to the VMCPU.
2617 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2618 * out-of-sync. Make sure to update the required fields
2619 * before using them.
2620 *
2621 * @remarks No-long-jump zone!!!
2622 */
2623static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2624{
2625 int rc = VINF_SUCCESS;
2626 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RSP)
2627 {
2628 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
2629 AssertRCReturn(rc, rc);
2630 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP;
2631 }
2632 return rc;
2633}
2634
2635
2636/**
2637 * Loads the guest's RFLAGS into the guest-state area in the VMCS.
2638 *
2639 * @returns VBox status code.
2640 * @param pVCpu Pointer to the VMCPU.
2641 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2642 * out-of-sync. Make sure to update the required fields
2643 * before using them.
2644 *
2645 * @remarks No-long-jump zone!!!
2646 */
2647static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2648{
2649 int rc = VINF_SUCCESS;
2650 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
2651 {
2652 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
2653 Let us assert it as such and use 32-bit VMWRITE. */
2654 Assert(!(pMixedCtx->rflags.u64 >> 32));
2655 X86EFLAGS uEFlags = pMixedCtx->eflags;
2656 uEFlags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
2657 uEFlags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */
2658
2659 /*
2660 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM exit.
2661 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
2662 */
2663 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2664 {
2665 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2666 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2667 pVCpu->hm.s.vmx.RealMode.eflags.u32 = uEFlags.u32; /* Save the original eflags of the real-mode guest. */
2668 uEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
2669 uEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
2670 }
2671
2672 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, uEFlags.u32);
2673 AssertRCReturn(rc, rc);
2674
2675 Log(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", uEFlags.u32));
2676 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;
2677 }
2678 return rc;
2679}
2680
2681
2682/**
2683 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
2684 *
2685 * @returns VBox status code.
2686 * @param pVCpu Pointer to the VMCPU.
2687 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2688 * out-of-sync. Make sure to update the required fields
2689 * before using them.
2690 *
2691 * @remarks No-long-jump zone!!!
2692 */
2693DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2694{
2695 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
2696 AssertRCReturn(rc, rc);
2697 rc = hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
2698 AssertRCReturn(rc, rc);
2699 rc = hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
2700 AssertRCReturn(rc, rc);
2701 return rc;
2702}
2703
2704
2705/**
2706 * Loads the guest control registers (CR0, CR3, CR4) into the guest-state area
2707 * in the VMCS.
2708 *
2709 * @returns VBox status code.
2710 * @param pVM Pointer to the VM.
2711 * @param pVCpu Pointer to the VMCPU.
2712 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2713 * out-of-sync. Make sure to update the required fields
2714 * before using them.
2715 *
2716 * @remarks No-long-jump zone!!!
2717 */
2718static int hmR0VmxLoadGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
2719{
2720 int rc = VINF_SUCCESS;
2721 PVM pVM = pVCpu->CTX_SUFF(pVM);
2722
2723 /*
2724 * Guest CR0.
2725 * Guest FPU.
2726 */
2727 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
2728 {
2729 Assert(!(pCtx->cr0 >> 32));
2730 uint32_t u32GuestCR0 = pCtx->cr0;
2731
2732 /* The guest's view (read access) of its CR0 is unblemished. */
2733 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
2734 AssertRCReturn(rc, rc);
2735 Log(("Load: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", u32GuestCR0));
2736
2737 /* Setup VT-x's view of the guest CR0. */
2738 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
2739 if (pVM->hm.s.fNestedPaging)
2740 {
2741 if (CPUMIsGuestPagingEnabledEx(pCtx))
2742 {
2743 /* The guest has paging enabled, let it access CR3 without causing a VM exit if supported. */
2744 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
2745 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
2746 }
2747 else
2748 {
2749 /* The guest doesn't have paging enabled, make CR3 access to cause VM exits to update our shadow. */
2750 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
2751 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
2752 }
2753
2754 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
2755 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2756 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
2757
2758 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
2759 AssertRCReturn(rc, rc);
2760 }
2761 else
2762 u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a VM-exit. */
2763
2764 /*
2765 * Guest FPU bits.
2766 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
2767 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2768 */
2769 u32GuestCR0 |= X86_CR0_NE;
2770 bool fInterceptNM = false;
2771 if (CPUMIsGuestFPUStateActive(pVCpu))
2772 {
2773 fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
2774 /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
2775 We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
2776 }
2777 else
2778 {
2779 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
2780 u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
2781 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
2782 }
2783
2784 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
2785 bool fInterceptMF = false;
2786 if (!(pCtx->cr0 & X86_CR0_NE))
2787 fInterceptMF = true;
2788
2789 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
2790 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2791 {
2792 Assert(PDMVmmDevHeapIsEnabled(pVM));
2793 Assert(pVM->hm.s.vmx.pRealModeTSS);
2794 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2795 fInterceptNM = true;
2796 fInterceptMF = true;
2797 }
2798 else
2799 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2800
2801 if (fInterceptNM)
2802 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
2803 else
2804 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
2805
2806 if (fInterceptMF)
2807 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
2808 else
2809 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
2810
2811 /* Additional intercepts for debugging, define these yourself explicitly. */
2812#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2813 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_BP)
2814 | RT_BIT(X86_XCPT_DB)
2815 | RT_BIT(X86_XCPT_DE)
2816 | RT_BIT(X86_XCPT_NM)
2817 | RT_BIT(X86_XCPT_UD)
2818 | RT_BIT(X86_XCPT_NP)
2819 | RT_BIT(X86_XCPT_SS)
2820 | RT_BIT(X86_XCPT_GP)
2821 | RT_BIT(X86_XCPT_PF)
2822 | RT_BIT(X86_XCPT_MF);
2823#elif defined(HMVMX_ALWAYS_TRAP_PF)
2824 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
2825#endif
2826
2827 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
2828
2829 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
2830 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 & pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
2831 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 | pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
2832 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
2833 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
2834 else
2835 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2836
2837 u32GuestCR0 |= uSetCR0;
2838 u32GuestCR0 &= uZapCR0;
2839 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
2840
2841 /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */
2842 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
2843 AssertRCReturn(rc, rc);
2844 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
2845 AssertRCReturn(rc, rc);
2846 Log(("Load: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", u32GuestCR0, uSetCR0, uZapCR0));
2847
2848 /*
2849 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
2850 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
2851 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
2852 */
2853 uint32_t u32CR0Mask = 0;
2854 u32CR0Mask = X86_CR0_PE
2855 | X86_CR0_NE
2856 | X86_CR0_WP
2857 | X86_CR0_PG
2858 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
2859 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
2860 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
2861 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2862 u32CR0Mask &= ~X86_CR0_PE;
2863 if (pVM->hm.s.fNestedPaging)
2864 u32CR0Mask &= ~X86_CR0_WP;
2865
2866 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
2867 if (fInterceptNM)
2868 u32CR0Mask |= (X86_CR0_TS | X86_CR0_MP);
2869 else
2870 u32CR0Mask &= ~(X86_CR0_TS | X86_CR0_MP);
2871
2872 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
2873 pVCpu->hm.s.vmx.cr0_mask = u32CR0Mask;
2874 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask);
2875 AssertRCReturn(rc, rc);
2876
2877 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
2878 }
2879
2880 /*
2881 * Guest CR2.
2882 * It's always loaded in the assembler code. Nothing to do here.
2883 */
2884
2885 /*
2886 * Guest CR3.
2887 */
2888 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
2889 {
2890 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
2891 if (pVM->hm.s.fNestedPaging)
2892 {
2893 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2894
2895 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2896 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
2897 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2898 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
2899
2900 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
2901 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
2902 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
2903
2904 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2905 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2906 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x3f) == 0, /* Bits 6:11 MBZ. */
2907 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
2908
2909 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
2910 AssertRCReturn(rc, rc);
2911 Log(("Load: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
2912
2913 if ( pVM->hm.s.vmx.fUnrestrictedGuest
2914 || CPUMIsGuestPagingEnabledEx(pCtx))
2915 {
2916 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2917 if (CPUMIsGuestInPAEModeEx(pCtx))
2918 {
2919 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); AssertRCReturn(rc, rc);
2920 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
2921 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
2922 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
2923 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
2924 AssertRCReturn(rc, rc);
2925 }
2926
2927 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
2928 have Unrestricted Execution to handle the guest when it's not using paging. */
2929 GCPhysGuestCR3 = pCtx->cr3;
2930 }
2931 else
2932 {
2933 /*
2934 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
2935 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
2936 * EPT takes care of translating it to host-physical addresses.
2937 */
2938 RTGCPHYS GCPhys;
2939 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2940 Assert(PDMVmmDevHeapIsEnabled(pVM));
2941
2942 /* We obtain it here every time as the guest could have relocated this PCI region. */
2943 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2944 AssertRCReturn(rc, rc);
2945
2946 GCPhysGuestCR3 = GCPhys;
2947 }
2948
2949 Log(("Load: VMX_VMCS_GUEST_CR3=%#RGv (GstN)\n", GCPhysGuestCR3));
2950 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
2951 }
2952 else
2953 {
2954 /* Non-nested paging case, just use the hypervisor's CR3. */
2955 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
2956
2957 Log(("Load: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", HCPhysGuestCR3));
2958 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
2959 }
2960 AssertRCReturn(rc, rc);
2961
2962 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR3;
2963 }
2964
2965 /*
2966 * Guest CR4.
2967 */
2968 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
2969 {
2970 Assert(!(pCtx->cr4 >> 32));
2971 uint32_t u32GuestCR4 = pCtx->cr4;
2972
2973 /* The guest's view of its CR4 is unblemished. */
2974 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
2975 AssertRCReturn(rc, rc);
2976 Log(("Load: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", u32GuestCR4));
2977
2978 /* Setup VT-x's view of the guest CR4. */
2979 /*
2980 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
2981 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2982 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2983 */
2984 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2985 {
2986 Assert(pVM->hm.s.vmx.pRealModeTSS);
2987 Assert(PDMVmmDevHeapIsEnabled(pVM));
2988 u32GuestCR4 &= ~X86_CR4_VME;
2989 }
2990
2991 if (pVM->hm.s.fNestedPaging)
2992 {
2993 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2994 && !pVM->hm.s.vmx.fUnrestrictedGuest)
2995 {
2996 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2997 u32GuestCR4 |= X86_CR4_PSE;
2998 /* Our identity mapping is a 32 bits page directory. */
2999 u32GuestCR4 &= ~X86_CR4_PAE;
3000 }
3001 /* else use guest CR4.*/
3002 }
3003 else
3004 {
3005 /*
3006 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
3007 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
3008 */
3009 switch (pVCpu->hm.s.enmShadowMode)
3010 {
3011 case PGMMODE_REAL: /* Real-mode. */
3012 case PGMMODE_PROTECTED: /* Protected mode without paging. */
3013 case PGMMODE_32_BIT: /* 32-bit paging. */
3014 {
3015 u32GuestCR4 &= ~X86_CR4_PAE;
3016 break;
3017 }
3018
3019 case PGMMODE_PAE: /* PAE paging. */
3020 case PGMMODE_PAE_NX: /* PAE paging with NX. */
3021 {
3022 u32GuestCR4 |= X86_CR4_PAE;
3023 break;
3024 }
3025
3026 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
3027 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
3028#ifdef VBOX_ENABLE_64_BITS_GUESTS
3029 break;
3030#endif
3031 default:
3032 AssertFailed();
3033 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
3034 }
3035 }
3036
3037 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
3038 uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 & pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
3039 uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 | pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
3040 u32GuestCR4 |= uSetCR4;
3041 u32GuestCR4 &= uZapCR4;
3042
3043 /* Write VT-x's view of the guest CR4 into the VMCS. */
3044 Log(("Load: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", u32GuestCR4, uSetCR4, uZapCR4));
3045 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4);
3046 AssertRCReturn(rc, rc);
3047
3048 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM exit. */
3049 uint32_t u32CR4Mask = 0;
3050 u32CR4Mask = X86_CR4_VME
3051 | X86_CR4_PAE
3052 | X86_CR4_PGE
3053 | X86_CR4_PSE
3054 | X86_CR4_VMXE;
3055 pVCpu->hm.s.vmx.cr4_mask = u32CR4Mask;
3056 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask);
3057 AssertRCReturn(rc, rc);
3058
3059 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
3060 }
3061 return rc;
3062}
3063
3064
3065/**
3066 * Loads the guest debug registers into the guest-state area in the VMCS.
3067 * This also sets up whether #DB and MOV DRx accesses cause VM exits.
3068 *
3069 * @returns VBox status code.
3070 * @param pVCpu Pointer to the VMCPU.
3071 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3072 * out-of-sync. Make sure to update the required fields
3073 * before using them.
3074 *
3075 * @remarks No-long-jump zone!!!
3076 */
3077static int hmR0VmxLoadGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3078{
3079 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
3080 return VINF_SUCCESS;
3081
3082#ifdef VBOX_STRICT
3083 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
3084 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
3085 {
3086 Assert(!(pMixedCtx->dr[7] >> 32)); /* upper 32 bits are reserved (MBZ). */
3087 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
3088 Assert((pMixedCtx->dr[7] & 0xd800) == 0); /* bits 15, 14, 12, 11 are reserved (MBZ). */
3089 Assert((pMixedCtx->dr[7] & 0x400) == 0x400); /* bit 10 is reserved (MB1). */
3090 }
3091#endif
3092
3093 int rc = VERR_INTERNAL_ERROR_5;
3094 PVM pVM = pVCpu->CTX_SUFF(pVM);
3095 bool fInterceptDB = false;
3096 bool fInterceptMovDRx = false;
3097 if (DBGFIsStepping(pVCpu))
3098 {
3099 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
3100 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
3101 {
3102 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
3103 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3104 AssertRCReturn(rc, rc);
3105 Assert(fInterceptDB == false);
3106 }
3107 else
3108 fInterceptDB = true;
3109 }
3110
3111 if (CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
3112 {
3113 if (!CPUMIsHyperDebugStateActive(pVCpu))
3114 {
3115 rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
3116 AssertRC(rc);
3117 }
3118 Assert(CPUMIsHyperDebugStateActive(pVCpu));
3119 fInterceptMovDRx = true;
3120 }
3121 else if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
3122 {
3123 if (!CPUMIsGuestDebugStateActive(pVCpu))
3124 {
3125 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
3126 AssertRC(rc);
3127 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
3128 }
3129 Assert(CPUMIsGuestDebugStateActive(pVCpu));
3130 Assert(fInterceptMovDRx == false);
3131 }
3132 else if (!CPUMIsGuestDebugStateActive(pVCpu))
3133 {
3134 /* For the first time we would need to intercept MOV DRx accesses even when the guest debug registers aren't loaded. */
3135 fInterceptMovDRx = true;
3136 }
3137
3138 /* Update the exception bitmap regarding intercepting #DB generated by the guest. */
3139 if (fInterceptDB)
3140 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
3141 else if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3142 {
3143#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3144 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
3145#endif
3146 }
3147
3148 /* Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions. */
3149 if (fInterceptMovDRx)
3150 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
3151 else
3152 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
3153
3154 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3155 AssertRCReturn(rc, rc);
3156 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3157 AssertRCReturn(rc, rc);
3158
3159 /* The guest's view of its DR7 is unblemished. */
3160 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
3161 AssertRCReturn(rc, rc);
3162
3163 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
3164 return rc;
3165}
3166
3167
3168#ifdef VBOX_STRICT
3169/**
3170 * Strict function to validate segment registers.
3171 *
3172 * @remarks Requires CR0.
3173 */
3174static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3175{
3176 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3177 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
3178 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
3179
3180 /* NOTE: The reason we check for attribute value 0 and not just the unusable bit here is because hmR0VmxWriteSegmentReg()
3181 * only updates the VMCS bits with the unusable bit and doesn't change the guest-context value. */
3182 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
3183 && ( !CPUMIsGuestInRealModeEx(pCtx)
3184 && !CPUMIsGuestInV86ModeEx(pCtx)))
3185 {
3186 /* Protected mode checks */
3187 /* CS */
3188 Assert(pCtx->cs.Attr.n.u1Present);
3189 Assert(!(pCtx->cs.Attr.u & 0xf00));
3190 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
3191 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
3192 || !(pCtx->cs.Attr.n.u1Granularity));
3193 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
3194 || (pCtx->cs.Attr.n.u1Granularity));
3195 /* CS cannot be loaded with NULL in protected mode. */
3196 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & HMVMX_SEL_UNUSABLE)); /** @todo is this really true even for 64-bit CS?!? */
3197 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
3198 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
3199 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
3200 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
3201 else
3202 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
3203 /* SS */
3204 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3205 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
3206 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0));
3207 if ( !(pCtx->cr0 & X86_CR0_PE)
3208 || pCtx->cs.Attr.n.u4Type == 3)
3209 {
3210 Assert(!pCtx->ss.Attr.n.u2Dpl);
3211 }
3212 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & HMVMX_SEL_UNUSABLE))
3213 {
3214 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3215 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
3216 Assert(pCtx->ss.Attr.n.u1Present);
3217 Assert(!(pCtx->ss.Attr.u & 0xf00));
3218 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
3219 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
3220 || !(pCtx->ss.Attr.n.u1Granularity));
3221 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
3222 || (pCtx->ss.Attr.n.u1Granularity));
3223 }
3224 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
3225 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & HMVMX_SEL_UNUSABLE))
3226 {
3227 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3228 Assert(pCtx->ds.Attr.n.u1Present);
3229 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
3230 Assert(!(pCtx->ds.Attr.u & 0xf00));
3231 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
3232 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
3233 || !(pCtx->ds.Attr.n.u1Granularity));
3234 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
3235 || (pCtx->ds.Attr.n.u1Granularity));
3236 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3237 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
3238 }
3239 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & HMVMX_SEL_UNUSABLE))
3240 {
3241 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3242 Assert(pCtx->es.Attr.n.u1Present);
3243 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
3244 Assert(!(pCtx->es.Attr.u & 0xf00));
3245 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
3246 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
3247 || !(pCtx->es.Attr.n.u1Granularity));
3248 Assert( !(pCtx->es.u32Limit & 0xfff00000)
3249 || (pCtx->es.Attr.n.u1Granularity));
3250 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3251 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
3252 }
3253 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & HMVMX_SEL_UNUSABLE))
3254 {
3255 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3256 Assert(pCtx->fs.Attr.n.u1Present);
3257 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
3258 Assert(!(pCtx->fs.Attr.u & 0xf00));
3259 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
3260 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
3261 || !(pCtx->fs.Attr.n.u1Granularity));
3262 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
3263 || (pCtx->fs.Attr.n.u1Granularity));
3264 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3265 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3266 }
3267 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & HMVMX_SEL_UNUSABLE))
3268 {
3269 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3270 Assert(pCtx->gs.Attr.n.u1Present);
3271 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
3272 Assert(!(pCtx->gs.Attr.u & 0xf00));
3273 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
3274 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
3275 || !(pCtx->gs.Attr.n.u1Granularity));
3276 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
3277 || (pCtx->gs.Attr.n.u1Granularity));
3278 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3279 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3280 }
3281 /* 64-bit capable CPUs. */
3282# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3283 Assert(!(pCtx->cs.u64Base >> 32));
3284 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
3285 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
3286 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
3287# endif
3288 }
3289 else if ( CPUMIsGuestInV86ModeEx(pCtx)
3290 || ( CPUMIsGuestInRealModeEx(pCtx)
3291 && !pVM->hm.s.vmx.fUnrestrictedGuest))
3292 {
3293 /* Real and v86 mode checks. */
3294 /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
3295 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
3296 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3297 {
3298 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
3299 }
3300 else
3301 {
3302 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
3303 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
3304 }
3305
3306 /* CS */
3307 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
3308 Assert(pCtx->cs.u32Limit == 0xffff);
3309 Assert(u32CSAttr == 0xf3);
3310 /* SS */
3311 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
3312 Assert(pCtx->ss.u32Limit == 0xffff);
3313 Assert(u32SSAttr == 0xf3);
3314 /* DS */
3315 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
3316 Assert(pCtx->ds.u32Limit == 0xffff);
3317 Assert(u32DSAttr == 0xf3);
3318 /* ES */
3319 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
3320 Assert(pCtx->es.u32Limit == 0xffff);
3321 Assert(u32ESAttr == 0xf3);
3322 /* FS */
3323 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
3324 Assert(pCtx->fs.u32Limit == 0xffff);
3325 Assert(u32FSAttr == 0xf3);
3326 /* GS */
3327 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
3328 Assert(pCtx->gs.u32Limit == 0xffff);
3329 Assert(u32GSAttr == 0xf3);
3330 /* 64-bit capable CPUs. */
3331# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3332 Assert(!(pCtx->cs.u64Base >> 32));
3333 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
3334 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
3335 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
3336# endif
3337 }
3338}
3339#endif /* VBOX_STRICT */
3340
3341
3342/**
3343 * Writes a guest segment register into the guest-state area in the VMCS.
3344 *
3345 * @returns VBox status code.
3346 * @param pVCpu Pointer to the VMCPU.
3347 * @param idxSel Index of the selector in the VMCS.
3348 * @param idxLimit Index of the segment limit in the VMCS.
3349 * @param idxBase Index of the segment base in the VMCS.
3350 * @param idxAccess Index of the access rights of the segment in the VMCS.
3351 * @param pSelReg Pointer to the segment selector.
3352 * @param pCtx Pointer to the guest-CPU context.
3353 *
3354 * @remarks No-long-jump zone!!!
3355 */
3356static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
3357 uint32_t idxAccess, PCPUMSELREG pSelReg, PCPUMCTX pCtx)
3358{
3359 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
3360 AssertRCReturn(rc, rc);
3361 rc = VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
3362 AssertRCReturn(rc, rc);
3363 rc = VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
3364 AssertRCReturn(rc, rc);
3365
3366 uint32_t u32Access = pSelReg->Attr.u;
3367 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3368 {
3369 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
3370 u32Access = 0xf3;
3371 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3372 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3373 }
3374 else
3375 {
3376 /*
3377 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
3378 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
3379 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
3380 * loaded in protected-mode have their attribute as 0.
3381 */
3382 if (!u32Access)
3383 u32Access = HMVMX_SEL_UNUSABLE;
3384 }
3385
3386 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
3387 AssertMsg((u32Access & HMVMX_SEL_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
3388 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
3389
3390 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
3391 AssertRCReturn(rc, rc);
3392 return rc;
3393}
3394
3395
3396/**
3397 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
3398 * into the guest-state area in the VMCS.
3399 *
3400 * @returns VBox status code.
3401 * @param pVM Pointer to the VM.
3402 * @param pVCPU Pointer to the VMCPU.
3403 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3404 * out-of-sync. Make sure to update the required fields
3405 * before using them.
3406 *
3407 * @remarks Requires CR0 (strict builds validation).
3408 * @remarks No-long-jump zone!!!
3409 */
3410static int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3411{
3412 int rc = VERR_INTERNAL_ERROR_5;
3413 PVM pVM = pVCpu->CTX_SUFF(pVM);
3414
3415 /*
3416 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
3417 */
3418 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
3419 {
3420 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
3421 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3422 {
3423 pVCpu->hm.s.vmx.RealMode.uAttrCS.u = pMixedCtx->cs.Attr.u;
3424 pVCpu->hm.s.vmx.RealMode.uAttrSS.u = pMixedCtx->ss.Attr.u;
3425 pVCpu->hm.s.vmx.RealMode.uAttrDS.u = pMixedCtx->ds.Attr.u;
3426 pVCpu->hm.s.vmx.RealMode.uAttrES.u = pMixedCtx->es.Attr.u;
3427 pVCpu->hm.s.vmx.RealMode.uAttrFS.u = pMixedCtx->fs.Attr.u;
3428 pVCpu->hm.s.vmx.RealMode.uAttrGS.u = pMixedCtx->gs.Attr.u;
3429 }
3430
3431#ifdef VBOX_WITH_REM
3432 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
3433 {
3434 Assert(pVM->hm.s.vmx.pRealModeTSS);
3435 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
3436 if ( pVCpu->hm.s.vmx.fWasInRealMode
3437 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
3438 {
3439 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
3440 in real-mode (e.g. OpenBSD 4.0) */
3441 REMFlushTBs(pVM);
3442 Log(("Load: Switch to protected mode detected!\n"));
3443 pVCpu->hm.s.vmx.fWasInRealMode = false;
3444 }
3445 }
3446#endif
3447 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
3448 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs, pMixedCtx);
3449 AssertRCReturn(rc, rc);
3450 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
3451 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss, pMixedCtx);
3452 AssertRCReturn(rc, rc);
3453 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
3454 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds, pMixedCtx);
3455 AssertRCReturn(rc, rc);
3456 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
3457 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es, pMixedCtx);
3458 AssertRCReturn(rc, rc);
3459 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
3460 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs, pMixedCtx);
3461 AssertRCReturn(rc, rc);
3462 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
3463 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs, pMixedCtx);
3464 AssertRCReturn(rc, rc);
3465
3466#ifdef VBOX_STRICT
3467 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
3468#endif
3469 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
3470 }
3471
3472 /*
3473 * Guest TR.
3474 */
3475 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
3476 {
3477 /*
3478 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
3479 * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
3480 * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
3481 */
3482 uint16_t u16Sel = 0;
3483 uint32_t u32Limit = 0;
3484 uint64_t u64Base = 0;
3485 uint32_t u32AccessRights = 0;
3486
3487 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3488 {
3489 u16Sel = pMixedCtx->tr.Sel;
3490 u32Limit = pMixedCtx->tr.u32Limit;
3491 u64Base = pMixedCtx->tr.u64Base;
3492 u32AccessRights = pMixedCtx->tr.Attr.u;
3493 }
3494 else
3495 {
3496 Assert(pVM->hm.s.vmx.pRealModeTSS);
3497 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
3498
3499 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
3500 RTGCPHYS GCPhys;
3501 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
3502 AssertRCReturn(rc, rc);
3503
3504 X86DESCATTR DescAttr;
3505 DescAttr.u = 0;
3506 DescAttr.n.u1Present = 1;
3507 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
3508
3509 u16Sel = 0;
3510 u32Limit = HM_VTX_TSS_SIZE;
3511 u64Base = GCPhys; /* in real-mode phys = virt. */
3512 u32AccessRights = DescAttr.u;
3513 }
3514
3515 /* Validate. */
3516 Assert(!(u16Sel & RT_BIT(2)));
3517 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
3518 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
3519 AssertMsg(!(u32AccessRights & HMVMX_SEL_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
3520 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
3521 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
3522 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
3523 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
3524 Assert( (u32Limit & 0xfff) == 0xfff
3525 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
3526 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
3527 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
3528
3529 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel); AssertRCReturn(rc, rc);
3530 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRCReturn(rc, rc);
3531 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRCReturn(rc, rc);
3532 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc);
3533
3534 Log(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base));
3535 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;
3536 }
3537
3538 /*
3539 * Guest GDTR.
3540 */
3541 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
3542 {
3543 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); AssertRCReturn(rc, rc);
3544 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); AssertRCReturn(rc, rc);
3545
3546 Assert(!(pMixedCtx->gdtr.cbGdt & UINT64_C(0xffff0000))); /* Bits 31:16 MBZ. */
3547 Log(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt));
3548 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
3549 }
3550
3551 /*
3552 * Guest LDTR.
3553 */
3554 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
3555 {
3556 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
3557 uint32_t u32Access = 0;
3558 if (!pMixedCtx->ldtr.Attr.u)
3559 u32Access = HMVMX_SEL_UNUSABLE;
3560 else
3561 u32Access = pMixedCtx->ldtr.Attr.u;
3562
3563 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel); AssertRCReturn(rc, rc);
3564 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit); AssertRCReturn(rc, rc);
3565 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base); AssertRCReturn(rc, rc);
3566 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRCReturn(rc, rc);
3567
3568 /* Validate. */
3569 if (!(u32Access & HMVMX_SEL_UNUSABLE))
3570 {
3571 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
3572 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
3573 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
3574 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
3575 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
3576 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
3577 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
3578 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
3579 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
3580 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
3581 }
3582
3583 Log(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base));
3584 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;
3585 }
3586
3587 /*
3588 * Guest IDTR.
3589 */
3590 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
3591 {
3592 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); AssertRCReturn(rc, rc);
3593 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); AssertRCReturn(rc, rc);
3594
3595 Assert(!(pMixedCtx->idtr.cbIdt & UINT64_C(0xffff0000))); /* Bits 31:16 MBZ. */
3596 Log(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt));
3597 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
3598 }
3599
3600 return VINF_SUCCESS;
3601}
3602
3603
3604/**
3605 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
3606 * areas. These MSRs will automatically be loaded to the host CPU on every
3607 * successful VM entry and stored from the host CPU on every successful VM exit.
3608 * Also loads the sysenter MSRs into the guest-state area in the VMCS.
3609 *
3610 * @returns VBox status code.
3611 * @param pVCpu Pointer to the VMCPU.
3612 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3613 * out-of-sync. Make sure to update the required fields
3614 * before using them.
3615 *
3616 * @remarks No-long-jump zone!!!
3617 */
3618static int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3619{
3620 AssertPtr(pVCpu);
3621 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
3622
3623 /*
3624 * MSRs covered by Auto-load/store: EFER, LSTAR, STAR, SF_MASK, TSC_AUX (RDTSCP).
3625 */
3626 int rc = VINF_SUCCESS;
3627 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
3628 {
3629#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
3630 PVM pVM = pVCpu->CTX_SUFF(pVM);
3631 PVMXMSR pGuestMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
3632 uint32_t cGuestMsrs = 0;
3633
3634 /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */
3635 const bool fSupportsNX = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
3636 const bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
3637 if (fSupportsNX || fSupportsLongMode)
3638 {
3639 /** @todo support save IA32_EFER, i.e.
3640 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, in which case the
3641 * guest EFER need not be part of the VM-entry MSR-load area. */
3642 pGuestMsr->u32IndexMSR = MSR_K6_EFER;
3643 pGuestMsr->u32Reserved = 0;
3644 pGuestMsr->u64Value = pMixedCtx->msrEFER;
3645 /* VT-x will complain if only MSR_K6_EFER_LME is set. See Intel spec. 26.4 "Loading MSRs" for details. */
3646 if (!CPUMIsGuestInLongModeEx(pMixedCtx))
3647 pGuestMsr->u64Value &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
3648 pGuestMsr++; cGuestMsrs++;
3649 if (fSupportsLongMode)
3650 {
3651 pGuestMsr->u32IndexMSR = MSR_K8_LSTAR;
3652 pGuestMsr->u32Reserved = 0;
3653 pGuestMsr->u64Value = pMixedCtx->msrLSTAR; /* 64 bits mode syscall rip */
3654 pGuestMsr++; cGuestMsrs++;
3655 pGuestMsr->u32IndexMSR = MSR_K6_STAR;
3656 pGuestMsr->u32Reserved = 0;
3657 pGuestMsr->u64Value = pMixedCtx->msrSTAR; /* legacy syscall eip, cs & ss */
3658 pGuestMsr++; cGuestMsrs++;
3659 pGuestMsr->u32IndexMSR = MSR_K8_SF_MASK;
3660 pGuestMsr->u32Reserved = 0;
3661 pGuestMsr->u64Value = pMixedCtx->msrSFMASK; /* syscall flag mask */
3662 pGuestMsr++; cGuestMsrs++;
3663 pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
3664 pGuestMsr->u32Reserved = 0;
3665 pGuestMsr->u64Value = pMixedCtx->msrKERNELGSBASE; /* swapgs exchange value */
3666 pGuestMsr++; cGuestMsrs++;
3667 }
3668 }
3669
3670 /*
3671 * RDTSCP requires the TSC_AUX MSR. Host and guest share the physical MSR. So we have to
3672 * load the guest's copy if the guest can execute RDTSCP without causing VM-exits.
3673 */
3674 if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP)
3675 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP))
3676 {
3677 pGuestMsr->u32IndexMSR = MSR_K8_TSC_AUX;
3678 pGuestMsr->u32Reserved = 0;
3679 rc = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pGuestMsr->u64Value);
3680 AssertRCReturn(rc, rc);
3681 pGuestMsr++; cGuestMsrs++;
3682 }
3683
3684 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
3685 if (cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc))
3686 {
3687 LogRel(("CPU autoload/store MSR count in VMCS exceeded cGuestMsrs=%u.\n", cGuestMsrs));
3688 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3689 }
3690
3691 /* Update the VCPU's copy of the guest MSR count. */
3692 pVCpu->hm.s.vmx.cGuestMsrs = cGuestMsrs;
3693 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cGuestMsrs); AssertRCReturn(rc, rc);
3694 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs); AssertRCReturn(rc, rc);
3695#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
3696
3697 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS;
3698 }
3699
3700 /*
3701 * Guest Sysenter MSRs.
3702 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
3703 * VM-exits on WRMSRs for these MSRs.
3704 */
3705 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
3706 {
3707 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc);
3708 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_CS_MSR;
3709 }
3710 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
3711 {
3712 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc);
3713 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR;
3714 }
3715 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
3716 {
3717 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc);
3718 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR;
3719 }
3720
3721 return rc;
3722}
3723
3724
3725/**
3726 * Loads the guest activity state into the guest-state area in the VMCS.
3727 *
3728 * @returns VBox status code.
3729 * @param pVCpu Pointer to the VMCPU.
3730 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3731 * out-of-sync. Make sure to update the required fields
3732 * before using them.
3733 *
3734 * @remarks No-long-jump zone!!!
3735 */
3736static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
3737{
3738 /** @todo See if we can make use of other states, e.g.
3739 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
3740 int rc = VINF_SUCCESS;
3741 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_ACTIVITY_STATE)
3742 {
3743 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
3744 AssertRCReturn(rc, rc);
3745 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_ACTIVITY_STATE;
3746 }
3747 return rc;
3748}
3749
3750
3751/**
3752 * Sets up the appropriate function to run guest code.
3753 *
3754 * @returns VBox status code.
3755 * @param pVCpu Pointer to the VMCPU.
3756 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3757 * out-of-sync. Make sure to update the required fields
3758 * before using them.
3759 *
3760 * @remarks No-long-jump zone!!!
3761 */
3762static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3763{
3764 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3765 {
3766#ifndef VBOX_ENABLE_64_BITS_GUESTS
3767 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
3768#endif
3769 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
3770#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3771 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
3772 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
3773#else
3774 /* 64-bit host or hybrid host. */
3775 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
3776#endif
3777 }
3778 else
3779 {
3780 /* Guest is not in long mode, use the 32-bit handler. */
3781 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
3782 }
3783 Assert(pVCpu->hm.s.vmx.pfnStartVM);
3784 return VINF_SUCCESS;
3785}
3786
3787
3788/**
3789 * Wrapper for running the guest code in VT-x.
3790 *
3791 * @returns VBox strict status code.
3792 * @param pVM Pointer to the VM.
3793 * @param pVCpu Pointer to the VMCPU.
3794 * @param pCtx Pointer to the guest-CPU context.
3795 *
3796 * @remarks No-long-jump zone!!!
3797 */
3798DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3799{
3800 /*
3801 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
3802 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
3803 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
3804 */
3805#ifdef VBOX_WITH_KERNEL_USING_XMM
3806 return HMR0VMXStartVMWrapXMM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
3807#else
3808 return pVCpu->hm.s.vmx.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
3809#endif
3810}
3811
3812
3813/**
3814 * Report world-switch error and dump some useful debug info.
3815 *
3816 * @param pVM Pointer to the VM.
3817 * @param pVCpu Pointer to the VMCPU.
3818 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
3819 * @param pCtx Pointer to the guest-CPU context.
3820 * @param pVmxTransient Pointer to the VMX transient structure (only
3821 * exitReason updated).
3822 */
3823static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
3824{
3825 Assert(pVM);
3826 Assert(pVCpu);
3827 Assert(pCtx);
3828 Assert(pVmxTransient);
3829 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3830
3831 Log(("VM-entry failure: %Rrc\n", rcVMRun));
3832 switch (rcVMRun)
3833 {
3834 case VERR_VMX_INVALID_VMXON_PTR:
3835 AssertFailed();
3836 break;
3837 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
3838 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
3839 {
3840 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.lasterror.u32ExitReason);
3841 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.lasterror.u32InstrError);
3842 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
3843 AssertRC(rc);
3844
3845#ifdef VBOX_STRICT
3846 Log(("uExitReason %#x (VmxTransient %#x)\n", pVCpu->hm.s.vmx.lasterror.u32ExitReason,
3847 pVmxTransient->uExitReason));
3848 Log(("Exit Qualification %#x\n", pVmxTransient->uExitQualification));
3849 Log(("InstrError %#x\n", pVCpu->hm.s.vmx.lasterror.u32InstrError));
3850 if (pVCpu->hm.s.vmx.lasterror.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
3851 Log(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.lasterror.u32InstrError]));
3852 else
3853 Log(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
3854
3855 /* VMX control bits. */
3856 uint32_t u32Val;
3857 uint64_t u64Val;
3858 HMVMXHCUINTREG uHCReg;
3859 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
3860 Log(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
3861 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
3862 Log(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
3863 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
3864 Log(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
3865 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
3866 Log(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
3867 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
3868 Log(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
3869 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
3870 Log(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
3871 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
3872 Log(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
3873 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
3874 Log(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
3875 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
3876 Log(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
3877 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
3878 Log(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
3879 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
3880 Log(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
3881 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
3882 Log(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
3883 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
3884 Log(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
3885 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
3886 Log(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
3887 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
3888 Log(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
3889 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
3890 Log(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
3891 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
3892 Log(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
3893 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
3894 Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
3895 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
3896 Log(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
3897 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
3898 Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
3899 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
3900 Log(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
3901
3902 /* Guest bits. */
3903 RTGCUINTREG uGCReg;
3904 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &uGCReg); AssertRC(rc);
3905 Log(("Old Guest Rip %#RGv New %#RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)uGCReg));
3906 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &uGCReg); AssertRC(rc);
3907 Log(("Old Guest Rsp %#RGv New %#RGv\n", (RTGCPTR)pCtx->rsp, (RTGCPTR)uGCReg));
3908 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
3909 Log(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
3910 rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc);
3911 Log(("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val));
3912
3913 /* Host bits. */
3914 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
3915 Log(("Host CR0 %#RHr\n", uHCReg));
3916 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
3917 Log(("Host CR3 %#RHr\n", uHCReg));
3918 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
3919 Log(("Host CR4 %#RHr\n", uHCReg));
3920
3921 RTGDTR HostGdtr;
3922 PCX86DESCHC pDesc;
3923 ASMGetGDTR(&HostGdtr);
3924 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_CS, &u32Val);
3925 Log(("Host CS %#08x\n", u32Val));
3926 if (u32Val < HostGdtr.cbGdt)
3927 {
3928 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3929 HMR0DumpDescriptor(pDesc, u32Val, "CS: ");
3930 }
3931
3932 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc);
3933 Log(("Host DS %#08x\n", u32Val));
3934 if (u32Val < HostGdtr.cbGdt)
3935 {
3936 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3937 HMR0DumpDescriptor(pDesc, u32Val, "DS: ");
3938 }
3939
3940 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc);
3941 Log(("Host ES %#08x\n", u32Val));
3942 if (u32Val < HostGdtr.cbGdt)
3943 {
3944 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3945 HMR0DumpDescriptor(pDesc, u32Val, "ES: ");
3946 }
3947
3948 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc);
3949 Log(("Host FS %#08x\n", u32Val));
3950 if (u32Val < HostGdtr.cbGdt)
3951 {
3952 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3953 HMR0DumpDescriptor(pDesc, u32Val, "FS: ");
3954 }
3955
3956 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc);
3957 Log(("Host GS %#08x\n", u32Val));
3958 if (u32Val < HostGdtr.cbGdt)
3959 {
3960 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3961 HMR0DumpDescriptor(pDesc, u32Val, "GS: ");
3962 }
3963
3964 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc);
3965 Log(("Host SS %#08x\n", u32Val));
3966 if (u32Val < HostGdtr.cbGdt)
3967 {
3968 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3969 HMR0DumpDescriptor(pDesc, u32Val, "SS: ");
3970 }
3971
3972 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc);
3973 Log(("Host TR %#08x\n", u32Val));
3974 if (u32Val < HostGdtr.cbGdt)
3975 {
3976 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3977 HMR0DumpDescriptor(pDesc, u32Val, "TR: ");
3978 }
3979
3980 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
3981 Log(("Host TR Base %#RHv\n", uHCReg));
3982 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
3983 Log(("Host GDTR Base %#RHv\n", uHCReg));
3984 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
3985 Log(("Host IDTR Base %#RHv\n", uHCReg));
3986 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
3987 Log(("Host SYSENTER CS %#08x\n", u32Val));
3988 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
3989 Log(("Host SYSENTER EIP %#RHv\n", uHCReg));
3990 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
3991 Log(("Host SYSENTER ESP %#RHv\n", uHCReg));
3992 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
3993 Log(("Host RSP %#RHv\n", uHCReg));
3994 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
3995 Log(("Host RIP %#RHv\n", uHCReg));
3996# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3997 if (HMVMX_IS_64BIT_HOST_MODE())
3998 {
3999 Log(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
4000 Log(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
4001 Log(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
4002 Log(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
4003 Log(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
4004 Log(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
4005 }
4006# endif
4007#endif /* VBOX_STRICT */
4008 break;
4009 }
4010
4011 default:
4012 /* Impossible */
4013 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
4014 break;
4015 }
4016 NOREF(pVM);
4017}
4018
4019
4020#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4021#ifndef VMX_USE_CACHED_VMCS_ACCESSES
4022# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
4023#endif
4024#ifdef VBOX_STRICT
4025static bool hmR0VmxIsValidWriteField(uint32_t idxField)
4026{
4027 switch (idxField)
4028 {
4029 case VMX_VMCS_GUEST_RIP:
4030 case VMX_VMCS_GUEST_RSP:
4031 case VMX_VMCS_GUEST_DR7:
4032 case VMX_VMCS_GUEST_SYSENTER_EIP:
4033 case VMX_VMCS_GUEST_SYSENTER_ESP:
4034 case VMX_VMCS_GUEST_GDTR_BASE:
4035 case VMX_VMCS_GUEST_IDTR_BASE:
4036 case VMX_VMCS_GUEST_CS_BASE:
4037 case VMX_VMCS_GUEST_DS_BASE:
4038 case VMX_VMCS_GUEST_ES_BASE:
4039 case VMX_VMCS_GUEST_FS_BASE:
4040 case VMX_VMCS_GUEST_GS_BASE:
4041 case VMX_VMCS_GUEST_SS_BASE:
4042 case VMX_VMCS_GUEST_LDTR_BASE:
4043 case VMX_VMCS_GUEST_TR_BASE:
4044 case VMX_VMCS_GUEST_CR3:
4045 return true;
4046 }
4047 return false;
4048}
4049
4050static bool hmR0VmxIsValidReadField(uint32_t idxField)
4051{
4052 switch (idxField)
4053 {
4054 /* Read-only fields. */
4055 case VMX_VMCS_RO_EXIT_QUALIFICATION:
4056 return true;
4057 }
4058 /* Remaining readable fields should also be writable. */
4059 return hmR0VmxIsValidWriteField(idxField);
4060}
4061#endif /* VBOX_STRICT */
4062
4063/**
4064 * Executes the specified handler in 64-bit mode.
4065 *
4066 * @returns VBox status code.
4067 * @param pVM Pointer to the VM.
4068 * @param pVCpu Pointer to the VMCPU.
4069 * @param pCtx Pointer to the guest CPU context.
4070 * @param enmOp The operation to perform.
4071 * @param cbParam Number of parameters.
4072 * @param paParam Array of 32-bit parameters.
4073 */
4074VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
4075 uint32_t *paParam)
4076{
4077 int rc, rc2;
4078 PHMGLOBLCPUINFO pCpu;
4079 RTHCPHYS HCPhysCpuPage;
4080 RTCCUINTREG uOldEFlags;
4081
4082 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
4083 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
4084 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
4085 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
4086
4087#ifdef VBOX_STRICT
4088 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
4089 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
4090
4091 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
4092 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
4093#endif
4094
4095 /* Disable interrupts. */
4096 uOldEFlags = ASMIntDisableFlags();
4097
4098#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
4099 RTCPUID idHostCpu = RTMpCpuId();
4100 CPUMR0SetLApic(pVM, idHostCpu);
4101#endif
4102
4103 pCpu = HMR0GetCurrentCpu();
4104 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4105
4106 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
4107 VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
4108
4109 /* Leave VMX Root Mode. */
4110 VMXDisable();
4111
4112 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4113
4114 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
4115 CPUMSetHyperEIP(pVCpu, enmOp);
4116 for (int i = (int)cbParam - 1; i >= 0; i--)
4117 CPUMPushHyper(pVCpu, paParam[i]);
4118
4119 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
4120
4121 /* Call the switcher. */
4122 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
4123 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
4124
4125 /** @todo replace with hmR0VmxEnterRootMode() and LeaveRootMode(). */
4126 /* Make sure the VMX instructions don't cause #UD faults. */
4127 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
4128
4129 /* Re-enter VMX Root Mode */
4130 rc2 = VMXEnable(HCPhysCpuPage);
4131 if (RT_FAILURE(rc2))
4132 {
4133 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4134 ASMSetFlags(uOldEFlags);
4135 return rc2;
4136 }
4137
4138 rc2 = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
4139 AssertRC(rc2);
4140 Assert(!(ASMGetFlags() & X86_EFL_IF));
4141 ASMSetFlags(uOldEFlags);
4142 return rc;
4143}
4144
4145
4146/**
4147 * Prepares for and executes VMLAUNCH (64 bits guests) for 32-bit hosts
4148 * supporting 64-bit guests.
4149 *
4150 * @returns VBox status code.
4151 * @param fResume Whether to VMLAUNCH or VMRESUME.
4152 * @param pCtx Pointer to the guest-CPU context.
4153 * @param pCache Pointer to the VMCS cache.
4154 * @param pVM Pointer to the VM.
4155 * @param pVCpu Pointer to the VMCPU.
4156 */
4157DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
4158{
4159 uint32_t aParam[6];
4160 PHMGLOBLCPUINFO pCpu = NULL;
4161 RTHCPHYS HCPhysCpuPage = 0;
4162 int rc = VERR_INTERNAL_ERROR_5;
4163
4164 pCpu = HMR0GetCurrentCpu();
4165 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4166
4167#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4168 pCache->uPos = 1;
4169 pCache->interPD = PGMGetInterPaeCR3(pVM);
4170 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
4171#endif
4172
4173#ifdef VBOX_STRICT
4174 pCache->TestIn.HCPhysCpuPage = 0;
4175 pCache->TestIn.HCPhysVmcs = 0;
4176 pCache->TestIn.pCache = 0;
4177 pCache->TestOut.HCPhysVmcs = 0;
4178 pCache->TestOut.pCache = 0;
4179 pCache->TestOut.pCtx = 0;
4180 pCache->TestOut.eflags = 0;
4181#endif
4182
4183 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
4184 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
4185 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
4186 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
4187 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
4188 aParam[5] = 0;
4189
4190#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4191 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
4192 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
4193#endif
4194 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, 6, &aParam[0]);
4195
4196#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4197 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
4198 Assert(pCtx->dr[4] == 10);
4199 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
4200#endif
4201
4202#ifdef VBOX_STRICT
4203 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
4204 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4205 pVCpu->hm.s.vmx.HCPhysVmcs));
4206 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4207 pCache->TestOut.HCPhysVmcs));
4208 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
4209 pCache->TestOut.pCache));
4210 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
4211 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
4212 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
4213 pCache->TestOut.pCtx));
4214 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
4215#endif
4216 return rc;
4217}
4218
4219
4220/**
4221 * Initialize the VMCS-Read cache. The VMCS cache is used for 32-bit hosts
4222 * running 64-bit guests (except 32-bit Darwin which runs with 64-bit paging in
4223 * 32-bit mode) for 64-bit fields that cannot be accessed in 32-bit mode. Some
4224 * 64-bit fields -can- be accessed (those that have a 32-bit FULL & HIGH part).
4225 *
4226 * @returns VBox status code.
4227 * @param pVM Pointer to the VM.
4228 * @param pVCpu Pointer to the VMCPU.
4229 */
4230static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
4231{
4232#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
4233{ \
4234 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
4235 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
4236 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
4237 ++cReadFields; \
4238}
4239
4240 AssertPtr(pVM);
4241 AssertPtr(pVCpu);
4242 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4243 uint32_t cReadFields = 0;
4244
4245 /* Guest-natural selector base fields */
4246#if 0
4247 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
4248 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
4249 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
4250#endif
4251 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
4252 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
4253 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
4254 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
4255 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
4256 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
4257 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
4258 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
4259 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
4260 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
4261 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DR7);
4262 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
4263 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
4264#if 0
4265 /* Unused natural width guest-state fields. */
4266 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
4267 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
4268#endif
4269 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
4270 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
4271
4272 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
4273#if 0
4274 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
4275 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
4276 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
4277 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
4278 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
4279 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
4280 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
4281 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
4282 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
4283#endif
4284
4285 /* Natural width guest-state fields. */
4286 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
4287#if 0
4288 /* Currently unused field. */
4289 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
4290#endif
4291
4292 if (pVM->hm.s.fNestedPaging)
4293 {
4294 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
4295 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
4296 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
4297 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
4298 }
4299 else
4300 {
4301 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
4302 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
4303 }
4304
4305#undef VMXLOCAL_INIT_READ_CACHE_FIELD
4306 return VINF_SUCCESS;
4307}
4308
4309
4310/**
4311 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
4312 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
4313 * darwin, running 64-bit guests).
4314 *
4315 * @returns VBox status code.
4316 * @param pVCpu Pointer to the VMCPU.
4317 * @param idxField The VMCS field encoding.
4318 * @param u64Val 16, 32 or 64 bits value.
4319 */
4320VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4321{
4322 int rc;
4323 switch (idxField)
4324 {
4325 /*
4326 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
4327 */
4328 /* 64-bit Control fields. */
4329 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
4330 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
4331 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
4332 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
4333 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
4334 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
4335 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
4336 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
4337 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
4338 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
4339 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
4340 case VMX_VMCS64_CTRL_EPTP_FULL:
4341 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
4342 /* 64-bit Guest-state fields. */
4343 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
4344 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
4345 case VMX_VMCS64_GUEST_PAT_FULL:
4346 case VMX_VMCS64_GUEST_EFER_FULL:
4347 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
4348 case VMX_VMCS64_GUEST_PDPTE0_FULL:
4349 case VMX_VMCS64_GUEST_PDPTE1_FULL:
4350 case VMX_VMCS64_GUEST_PDPTE2_FULL:
4351 case VMX_VMCS64_GUEST_PDPTE3_FULL:
4352 /* 64-bit Host-state fields. */
4353 case VMX_VMCS64_HOST_FIELD_PAT_FULL:
4354 case VMX_VMCS64_HOST_FIELD_EFER_FULL:
4355 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
4356 {
4357 rc = VMXWriteVmcs32(idxField, u64Val);
4358 rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32));
4359 break;
4360 }
4361
4362 /*
4363 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
4364 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
4365 */
4366 /* Natural-width Guest-state fields. */
4367 case VMX_VMCS_GUEST_CR3:
4368 case VMX_VMCS_GUEST_ES_BASE:
4369 case VMX_VMCS_GUEST_CS_BASE:
4370 case VMX_VMCS_GUEST_SS_BASE:
4371 case VMX_VMCS_GUEST_DS_BASE:
4372 case VMX_VMCS_GUEST_FS_BASE:
4373 case VMX_VMCS_GUEST_GS_BASE:
4374 case VMX_VMCS_GUEST_LDTR_BASE:
4375 case VMX_VMCS_GUEST_TR_BASE:
4376 case VMX_VMCS_GUEST_GDTR_BASE:
4377 case VMX_VMCS_GUEST_IDTR_BASE:
4378 case VMX_VMCS_GUEST_DR7:
4379 case VMX_VMCS_GUEST_RSP:
4380 case VMX_VMCS_GUEST_RIP:
4381 case VMX_VMCS_GUEST_SYSENTER_ESP:
4382 case VMX_VMCS_GUEST_SYSENTER_EIP:
4383 {
4384 if (!(u64Val >> 32))
4385 {
4386 /* If this field is 64-bit, VT-x will zero out the top bits. */
4387 rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
4388 }
4389 else
4390 {
4391 /* Assert that only the 32->64 switcher case should ever come here. */
4392 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
4393 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
4394 }
4395 break;
4396 }
4397
4398 default:
4399 {
4400 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
4401 rc = VERR_INVALID_PARAMETER;
4402 break;
4403 }
4404 }
4405 AssertRCReturn(rc, rc);
4406 return rc;
4407}
4408
4409
4410/**
4411 * Queue up a VMWRITE by using the VMCS write cache. This is only used on 32-bit
4412 * hosts (except darwin) for 64-bit guests.
4413 *
4414 * @param pVCpu Pointer to the VMCPU.
4415 * @param idxField The VMCS field encoding.
4416 * @param u64Val 16, 32 or 64 bits value.
4417 */
4418VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4419{
4420 AssertPtr(pVCpu);
4421 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4422
4423 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
4424 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
4425
4426 /* Make sure there are no duplicates. */
4427 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
4428 {
4429 if (pCache->Write.aField[i] == idxField)
4430 {
4431 pCache->Write.aFieldVal[i] = u64Val;
4432 return VINF_SUCCESS;
4433 }
4434 }
4435
4436 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
4437 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
4438 pCache->Write.cValidEntries++;
4439 return VINF_SUCCESS;
4440}
4441
4442/* Enable later when the assembly code uses these as callbacks. */
4443#if 0
4444/*
4445 * Loads the VMCS write-cache into the CPU (by executing VMWRITEs).
4446 *
4447 * @param pVCpu Pointer to the VMCPU.
4448 * @param pCache Pointer to the VMCS cache.
4449 *
4450 * @remarks No-long-jump zone!!!
4451 */
4452VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache)
4453{
4454 AssertPtr(pCache);
4455 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
4456 {
4457 int rc = VMXWriteVmcs64(pCache->Write.aField[i], pCache->Write.aFieldVal[i]);
4458 AssertRC(rc);
4459 }
4460 pCache->Write.cValidEntries = 0;
4461}
4462
4463
4464/**
4465 * Stores the VMCS read-cache from the CPU (by executing VMREADs).
4466 *
4467 * @param pVCpu Pointer to the VMCPU.
4468 * @param pCache Pointer to the VMCS cache.
4469 *
4470 * @remarks No-long-jump zone!!!
4471 */
4472VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache)
4473{
4474 AssertPtr(pCache);
4475 for (uint32_t i = 0; i < pCache->Read.cValidEntries; i++)
4476 {
4477 int rc = VMXReadVmcs64(pCache->Read.aField[i], &pCache->Read.aFieldVal[i]);
4478 AssertRC(rc);
4479 }
4480}
4481#endif
4482#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
4483
4484
4485/**
4486 * Sets up the usage of TSC-offsetting and updates the VMCS. If offsetting is
4487 * not possible, cause VM-exits on RDTSC(P)s. Also sets up the VMX preemption
4488 * timer.
4489 *
4490 * @returns VBox status code.
4491 * @param pVCpu Pointer to the VMCPU.
4492 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4493 * out-of-sync. Make sure to update the required fields
4494 * before using them.
4495 * @remarks No-long-jump zone!!!
4496 */
4497static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4498{
4499 int rc = VERR_INTERNAL_ERROR_5;
4500 bool fOffsettedTsc = false;
4501 PVM pVM = pVCpu->CTX_SUFF(pVM);
4502 if (pVM->hm.s.vmx.fUsePreemptTimer)
4503 {
4504 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset);
4505
4506 /* Make sure the returned values have sane upper and lower boundaries. */
4507 uint64_t u64CpuHz = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
4508 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
4509 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
4510 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
4511
4512 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
4513 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);
4514 }
4515 else
4516 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset);
4517
4518 if (fOffsettedTsc)
4519 {
4520 uint64_t u64CurTSC = ASMReadTSC();
4521 if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
4522 {
4523 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
4524 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
4525
4526 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4527 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4528 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
4529 }
4530 else
4531 {
4532 /* VM-exit on RDTSC(P) as we would otherwise pass decreasing TSC values to the guest. */
4533 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4534 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4535 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
4536 }
4537 }
4538 else
4539 {
4540 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
4541 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4542 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4543 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
4544 }
4545}
4546
4547
4548/**
4549 * Determines if an exception is a contributory exception. Contributory
4550 * exceptions are ones which can cause double-faults. Page-fault is
4551 * intentionally not included here as it's a conditional contributory exception.
4552 *
4553 * @returns true if the exception is contributory, false otherwise.
4554 * @param uVector The exception vector.
4555 */
4556DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
4557{
4558 switch (uVector)
4559 {
4560 case X86_XCPT_GP:
4561 case X86_XCPT_SS:
4562 case X86_XCPT_NP:
4563 case X86_XCPT_TS:
4564 case X86_XCPT_DE:
4565 return true;
4566 default:
4567 break;
4568 }
4569 return false;
4570}
4571
4572
4573/**
4574 * Sets an event as a pending event to be injected into the guest.
4575 *
4576 * @param pVCpu Pointer to the VMCPU.
4577 * @param u32IntrInfo The VM-entry interruption-information field.
4578 * @param cbInstr The VM-entry instruction length in bytes (for software
4579 * interrupts, exceptions and privileged software
4580 * exceptions).
4581 * @param u32ErrCode The VM-entry exception error code.
4582 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
4583 * page-fault.
4584 */
4585DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntrInfo, uint32_t cbInstr, uint32_t u32ErrCode,
4586 RTGCUINTPTR GCPtrFaultAddress)
4587{
4588 Assert(!pVCpu->hm.s.Event.fPending);
4589 pVCpu->hm.s.Event.fPending = true;
4590 pVCpu->hm.s.Event.u64IntrInfo = u32IntrInfo;
4591 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
4592 pVCpu->hm.s.Event.cbInstr = cbInstr;
4593 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
4594}
4595
4596
4597/**
4598 * Sets a double-fault (#DF) exception as pending-for-injection into the VM.
4599 *
4600 * @param pVCpu Pointer to the VMCPU.
4601 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4602 * out-of-sync. Make sure to update the required fields
4603 * before using them.
4604 */
4605DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4606{
4607 uint32_t u32IntrInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
4608 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
4609 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
4610 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
4611}
4612
4613
4614/**
4615 * Handle a condition that occurred while delivering an event through the guest
4616 * IDT.
4617 *
4618 * @returns VBox status code (informational error codes included).
4619 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
4620 * @retval VINF_VMX_DOUBLE_FAULT if a #DF condition was detected and we ought to
4621 * continue execution of the guest which will delivery the #DF.
4622 * @retval VINF_EM_RESET if we detected a triple-fault condition.
4623 *
4624 * @param pVCpu Pointer to the VMCPU.
4625 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4626 * out-of-sync. Make sure to update the required fields
4627 * before using them.
4628 * @param pVmxTransient Pointer to the VMX transient structure.
4629 *
4630 * @remarks No-long-jump zone!!!
4631 */
4632static int hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
4633{
4634 int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
4635 AssertRC(rc);
4636 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
4637 {
4638 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
4639 AssertRCReturn(rc, rc);
4640
4641 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
4642 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntrInfo);
4643 uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
4644
4645 typedef enum
4646 {
4647 VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
4648 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
4649 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
4650 VMXREFLECTXCPT_NONE /* Nothing to reflect. */
4651 } VMXREFLECTXCPT;
4652
4653 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
4654 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
4655 if (uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
4656 {
4657 enmReflect = VMXREFLECTXCPT_XCPT;
4658#ifdef VBOX_STRICT
4659 if ( hmR0VmxIsContributoryXcpt(uIdtVector)
4660 && uExitVector == X86_XCPT_PF)
4661 {
4662 Log(("IDT: Contributory #PF uCR2=%#RGv\n", pMixedCtx->cr2));
4663 }
4664#endif
4665 if ( uExitVector == X86_XCPT_PF
4666 && uIdtVector == X86_XCPT_PF)
4667 {
4668 pVmxTransient->fVectoringPF = true;
4669 Log(("IDT: Vectoring #PF uCR2=%#RGv\n", pMixedCtx->cr2));
4670 }
4671 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
4672 && hmR0VmxIsContributoryXcpt(uExitVector)
4673 && ( hmR0VmxIsContributoryXcpt(uIdtVector)
4674 || uIdtVector == X86_XCPT_PF))
4675 {
4676 enmReflect = VMXREFLECTXCPT_DF;
4677 }
4678 else if (uIdtVector == X86_XCPT_DF)
4679 enmReflect = VMXREFLECTXCPT_TF;
4680 }
4681 else if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4682 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
4683 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
4684 {
4685 /*
4686 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and privileged software exception
4687 * (whatever they are) as they reoccur when restarting the instruction.
4688 */
4689 enmReflect = VMXREFLECTXCPT_XCPT;
4690 }
4691
4692 switch (enmReflect)
4693 {
4694 case VMXREFLECTXCPT_XCPT:
4695 {
4696 uint32_t u32ErrCode = 0;
4697 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo))
4698 {
4699 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
4700 AssertRCReturn(rc, rc);
4701 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
4702 }
4703
4704 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
4705 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INTR_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
4706 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
4707 rc = VINF_SUCCESS;
4708 Log(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntrInfo,
4709 pVCpu->hm.s.Event.u32ErrCode));
4710 break;
4711 }
4712
4713 case VMXREFLECTXCPT_DF:
4714 {
4715 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
4716 rc = VINF_VMX_DOUBLE_FAULT;
4717 Log(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo,
4718 uIdtVector, uExitVector));
4719 break;
4720 }
4721
4722 case VMXREFLECTXCPT_TF:
4723 {
4724 Log(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));
4725 rc = VINF_EM_RESET;
4726 break;
4727 }
4728
4729 default:
4730 Assert(rc == VINF_SUCCESS);
4731 break;
4732 }
4733 }
4734 Assert(rc == VINF_SUCCESS || rc == VINF_VMX_DOUBLE_FAULT || rc == VINF_EM_RESET);
4735 return rc;
4736}
4737
4738
4739/**
4740 * Saves the guest's CR0 register from the VMCS into the guest-CPU context.
4741 *
4742 * @returns VBox status code.
4743 * @param pVCpu Pointer to the VMCPU.
4744 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4745 * out-of-sync. Make sure to update the required fields
4746 * before using them.
4747 *
4748 * @remarks No-long-jump zone!!!
4749 */
4750static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4751{
4752 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0))
4753 {
4754 uint32_t uVal = 0;
4755 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal);
4756 AssertRCReturn(rc, rc);
4757 uint32_t uShadow = 0;
4758 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
4759 AssertRCReturn(rc, rc);
4760
4761 uVal = (uShadow & pVCpu->hm.s.vmx.cr0_mask) | (uVal & ~pVCpu->hm.s.vmx.cr0_mask);
4762 CPUMSetGuestCR0(pVCpu, uVal);
4763 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR0;
4764 }
4765 return VINF_SUCCESS;
4766}
4767
4768
4769/**
4770 * Saves the guest's CR4 register from the VMCS into the guest-CPU context.
4771 *
4772 * @returns VBox status code.
4773 * @param pVCpu Pointer to the VMCPU.
4774 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4775 * out-of-sync. Make sure to update the required fields
4776 * before using them.
4777 *
4778 * @remarks No-long-jump zone!!!
4779 */
4780static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4781{
4782 int rc = VINF_SUCCESS;
4783 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4))
4784 {
4785 uint32_t uVal = 0;
4786 uint32_t uShadow = 0;
4787 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal);
4788 AssertRCReturn(rc, rc);
4789 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
4790 AssertRCReturn(rc, rc);
4791
4792 uVal = (uShadow & pVCpu->hm.s.vmx.cr4_mask) | (uVal & ~pVCpu->hm.s.vmx.cr4_mask);
4793 CPUMSetGuestCR4(pVCpu, uVal);
4794 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR4;
4795 }
4796 return rc;
4797}
4798
4799
4800/**
4801 * Saves the guest's RIP register from the VMCS into the guest-CPU context.
4802 *
4803 * @returns VBox status code.
4804 * @param pVCpu Pointer to the VMCPU.
4805 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4806 * out-of-sync. Make sure to update the required fields
4807 * before using them.
4808 *
4809 * @remarks No-long-jump zone!!!
4810 */
4811static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4812{
4813 int rc = VINF_SUCCESS;
4814 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP))
4815 {
4816 RTGCUINTREG uVal = 0;
4817 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &uVal);
4818 AssertRCReturn(rc, rc);
4819
4820 pMixedCtx->rip = uVal;
4821 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RIP;
4822 }
4823 return rc;
4824}
4825
4826
4827/**
4828 * Saves the guest's RSP register from the VMCS into the guest-CPU context.
4829 *
4830 * @returns VBox status code.
4831 * @param pVCpu Pointer to the VMCPU.
4832 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4833 * out-of-sync. Make sure to update the required fields
4834 * before using them.
4835 *
4836 * @remarks No-long-jump zone!!!
4837 */
4838static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4839{
4840 int rc = VINF_SUCCESS;
4841 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RSP))
4842 {
4843 RTGCUINTREG uVal = 0;
4844 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &uVal);
4845 AssertRCReturn(rc, rc);
4846
4847 pMixedCtx->rsp = uVal;
4848 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RSP;
4849 }
4850 return rc;
4851}
4852
4853
4854/**
4855 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
4856 *
4857 * @returns VBox status code.
4858 * @param pVCpu Pointer to the VMCPU.
4859 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4860 * out-of-sync. Make sure to update the required fields
4861 * before using them.
4862 *
4863 * @remarks No-long-jump zone!!!
4864 */
4865static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4866{
4867 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS))
4868 {
4869 uint32_t uVal = 0;
4870 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal);
4871 AssertRCReturn(rc, rc);
4872
4873 pMixedCtx->eflags.u32 = uVal;
4874 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) /* Undo our real-on-v86-mode changes to eflags if necessary. */
4875 {
4876 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
4877 Log(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32));
4878
4879 pMixedCtx->eflags.Bits.u1VM = 0;
4880 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.eflags.Bits.u2IOPL;
4881 }
4882
4883 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RFLAGS;
4884 }
4885 return VINF_SUCCESS;
4886}
4887
4888
4889/**
4890 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
4891 * guest-CPU context.
4892 */
4893DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4894{
4895 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
4896 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
4897 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
4898 return rc;
4899}
4900
4901
4902/**
4903 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
4904 * from the guest-state area in the VMCS.
4905 *
4906 * @param pVCpu Pointer to the VMCPU.
4907 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4908 * out-of-sync. Make sure to update the required fields
4909 * before using them.
4910 *
4911 * @remarks No-long-jump zone!!!
4912 */
4913static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4914{
4915 uint32_t uIntrState = 0;
4916 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
4917 AssertRC(rc);
4918
4919 if (!uIntrState)
4920 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
4921 else
4922 {
4923 Assert( uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
4924 || uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
4925 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
4926 AssertRC(rc);
4927 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */
4928 AssertRC(rc);
4929
4930 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
4931 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
4932 }
4933}
4934
4935
4936/**
4937 * Saves the guest's activity state.
4938 *
4939 * @returns VBox status code.
4940 * @param pVCpu Pointer to the VMCPU.
4941 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4942 * out-of-sync. Make sure to update the required fields
4943 * before using them.
4944 *
4945 * @remarks No-long-jump zone!!!
4946 */
4947static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4948{
4949 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
4950 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_ACTIVITY_STATE;
4951 return VINF_SUCCESS;
4952}
4953
4954
4955/**
4956 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
4957 * the current VMCS into the guest-CPU context.
4958 *
4959 * @returns VBox status code.
4960 * @param pVCpu Pointer to the VMCPU.
4961 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4962 * out-of-sync. Make sure to update the required fields
4963 * before using them.
4964 *
4965 * @remarks No-long-jump zone!!!
4966 */
4967static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4968{
4969 int rc = VINF_SUCCESS;
4970 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
4971 {
4972 uint32_t u32Val = 0;
4973 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc);
4974 pMixedCtx->SysEnter.cs = u32Val;
4975 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR;
4976 }
4977
4978 RTGCUINTREG uGCVal = 0;
4979 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
4980 {
4981 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &uGCVal); AssertRCReturn(rc, rc);
4982 pMixedCtx->SysEnter.eip = uGCVal;
4983 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR;
4984 }
4985 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
4986 {
4987 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &uGCVal); AssertRCReturn(rc, rc);
4988 pMixedCtx->SysEnter.esp = uGCVal;
4989 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR;
4990 }
4991 return rc;
4992}
4993
4994
4995/**
4996 * Saves the guest FS_BASE MSRs from the current VMCS into the guest-CPU
4997 * context.
4998 *
4999 * @returns VBox status code.
5000 * @param pVCpu Pointer to the VMCPU.
5001 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5002 * out-of-sync. Make sure to update the required fields
5003 * before using them.
5004 *
5005 * @remarks No-long-jump zone!!!
5006 */
5007static int hmR0VmxSaveGuestFSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5008{
5009 int rc = VINF_SUCCESS;
5010 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_FS_BASE_MSR))
5011 {
5012 RTGCUINTREG uVal = 0;
5013 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_FS_BASE, &uVal); AssertRCReturn(rc, rc);
5014 pMixedCtx->fs.u64Base = uVal;
5015 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_FS_BASE_MSR;
5016 }
5017 return rc;
5018}
5019
5020
5021/**
5022 * Saves the guest GS_BASE MSRs from the current VMCS into the guest-CPU
5023 * context.
5024 *
5025 * @returns VBox status code.
5026 * @param pVCpu Pointer to the VMCPU.
5027 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5028 * out-of-sync. Make sure to update the required fields
5029 * before using them.
5030 *
5031 * @remarks No-long-jump zone!!!
5032 */
5033static int hmR0VmxSaveGuestGSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5034{
5035 int rc = VINF_SUCCESS;
5036 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GS_BASE_MSR))
5037 {
5038 RTGCUINTREG uVal = 0;
5039 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GS_BASE, &uVal); AssertRCReturn(rc, rc);
5040 pMixedCtx->gs.u64Base = uVal;
5041 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GS_BASE_MSR;
5042 }
5043 return rc;
5044}
5045
5046
5047/**
5048 * Saves the auto load/store'd guest MSRs from the current VMCS into the
5049 * guest-CPU context. Currently these are LSTAR, STAR, SFMASK, KERNEL-GS BASE
5050 * and TSC_AUX.
5051 *
5052 * @returns VBox status code.
5053 * @param pVCpu Pointer to the VMCPU.
5054 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5055 * out-of-sync. Make sure to update the required fields
5056 * before using them.
5057 *
5058 * @remarks No-long-jump zone!!!
5059 */
5060static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5061{
5062 if (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)
5063 return VINF_SUCCESS;
5064
5065#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
5066 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++)
5067 {
5068 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
5069 pMsr += i;
5070 switch (pMsr->u32IndexMSR)
5071 {
5072 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;
5073 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;
5074 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;
5075 case MSR_K8_TSC_AUX: CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); break;
5076 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;
5077 case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit. */ break;
5078 default:
5079 {
5080 AssertFailed();
5081 return VERR_HM_UNEXPECTED_LD_ST_MSR;
5082 }
5083 }
5084 }
5085#endif
5086
5087 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS;
5088 return VINF_SUCCESS;
5089}
5090
5091
5092/**
5093 * Saves the guest control registers from the current VMCS into the guest-CPU
5094 * context.
5095 *
5096 * @returns VBox status code.
5097 * @param pVCpu Pointer to the VMCPU.
5098 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5099 * out-of-sync. Make sure to update the required fields
5100 * before using them.
5101 *
5102 * @remarks No-long-jump zone!!!
5103 */
5104static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5105{
5106 /* Guest CR0. Guest FPU. */
5107 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5108 AssertRCReturn(rc, rc);
5109
5110 /* Guest CR4. */
5111 rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
5112 AssertRCReturn(rc, rc);
5113
5114 /* Guest CR2 - updated always during the world-switch or in #PF. */
5115 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
5116 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR3))
5117 {
5118 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
5119 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4);
5120
5121 PVM pVM = pVCpu->CTX_SUFF(pVM);
5122 if ( pVM->hm.s.vmx.fUnrestrictedGuest
5123 || ( pVM->hm.s.fNestedPaging
5124 && CPUMIsGuestPagingEnabledEx(pMixedCtx)))
5125 {
5126 RTGCUINTREG uVal = 0;
5127 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &uVal);
5128 if (pMixedCtx->cr3 != uVal)
5129 {
5130 CPUMSetGuestCR3(pVCpu, uVal);
5131 if (VMMRZCallRing3IsEnabled(pVCpu))
5132 {
5133 PGMUpdateCR3(pVCpu, uVal);
5134 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5135 }
5136 else
5137 {
5138 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
5139 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
5140 }
5141 }
5142
5143 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
5144 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */
5145 {
5146 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
5147 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
5148 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
5149 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
5150
5151 if (VMMRZCallRing3IsEnabled(pVCpu))
5152 {
5153 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5154 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5155 }
5156 else
5157 {
5158 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
5159 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
5160 }
5161 }
5162 }
5163
5164 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR3;
5165 }
5166
5167 /*
5168 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
5169 * -> VMMRZCallRing3Disable() -> hmR0VmxSaveGuestState() -> Set VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
5170 * -> continue with VM-exit handling -> hmR0VmxSaveGuestControlRegs() and here we are.
5171 *
5172 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
5173 */
5174 if (VMMRZCallRing3IsEnabled(pVCpu))
5175 {
5176 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
5177 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
5178
5179 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
5180 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5181
5182 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5183 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5184 }
5185
5186 return rc;
5187}
5188
5189
5190/**
5191 * Reads a guest segment register from the current VMCS into the guest-CPU
5192 * context.
5193 *
5194 * @returns VBox status code.
5195 * @param pVCpu Pointer to the VMCPU.
5196 * @param idxSel Index of the selector in the VMCS.
5197 * @param idxLimit Index of the segment limit in the VMCS.
5198 * @param idxBase Index of the segment base in the VMCS.
5199 * @param idxAccess Index of the access rights of the segment in the VMCS.
5200 * @param pSelReg Pointer to the segment selector.
5201 *
5202 * @remarks No-long-jump zone!!!
5203 * @remarks Never call this function directly. Use the VMXLOCAL_READ_SEG() macro
5204 * as that takes care of whether to read from the VMCS cache or not.
5205 */
5206DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
5207 PCPUMSELREG pSelReg)
5208{
5209 uint32_t u32Val = 0;
5210 int rc = VMXReadVmcs32(idxSel, &u32Val);
5211 AssertRCReturn(rc, rc);
5212 pSelReg->Sel = (uint16_t)u32Val;
5213 pSelReg->ValidSel = (uint16_t)u32Val;
5214 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
5215
5216 rc = VMXReadVmcs32(idxLimit, &u32Val);
5217 AssertRCReturn(rc, rc);
5218 pSelReg->u32Limit = u32Val;
5219
5220 RTGCUINTREG uGCVal = 0;
5221 rc = VMXReadVmcsGstNByIdxVal(idxBase, &uGCVal);
5222 AssertRCReturn(rc, rc);
5223 pSelReg->u64Base = uGCVal;
5224
5225 rc = VMXReadVmcs32(idxAccess, &u32Val);
5226 AssertRCReturn(rc, rc);
5227 pSelReg->Attr.u = u32Val;
5228
5229 /*
5230 * If VT-x marks the segment as unusable, the rest of the attributes are undefined with certain exceptions (some bits in
5231 * CS, SS). Regardless, we have to clear the bits here and only retain the unusable bit because the unusable bit is specific
5232 * to VT-x, everyone else relies on the attribute being zero and have no clue what the unusable bit is.
5233 *
5234 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
5235 */
5236 if (pSelReg->Attr.u & HMVMX_SEL_UNUSABLE)
5237 {
5238 Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR); /* TR is the only selector that can never be unusable. */
5239 pSelReg->Attr.u = HMVMX_SEL_UNUSABLE;
5240 }
5241 return VINF_SUCCESS;
5242}
5243
5244#ifdef VMX_USE_CACHED_VMCS_ACCESSES
5245#define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5246 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5247 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5248#else
5249#define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5250 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5251 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5252#endif
5253
5254/**
5255 * Saves the guest segment registers from the current VMCS into the guest-CPU
5256 * context.
5257 *
5258 * @returns VBox status code.
5259 * @param pVCpu Pointer to the VMCPU.
5260 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5261 * out-of-sync. Make sure to update the required fields
5262 * before using them.
5263 *
5264 * @remarks No-long-jump zone!!!
5265 */
5266static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5267{
5268 /* Guest segment registers. */
5269 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SEGMENT_REGS))
5270 {
5271 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5272 AssertRCReturn(rc, rc);
5273 rc = VMXLOCAL_READ_SEG(CS, cs);
5274 AssertRCReturn(rc, rc);
5275 rc = VMXLOCAL_READ_SEG(SS, ss);
5276 AssertRCReturn(rc, rc);
5277 rc = VMXLOCAL_READ_SEG(DS, ds);
5278 AssertRCReturn(rc, rc);
5279 rc = VMXLOCAL_READ_SEG(ES, es);
5280 AssertRCReturn(rc, rc);
5281 rc = VMXLOCAL_READ_SEG(FS, fs);
5282 AssertRCReturn(rc, rc);
5283 rc = VMXLOCAL_READ_SEG(GS, gs);
5284 AssertRCReturn(rc, rc);
5285
5286 /* Restore segment attributes for real-on-v86 mode hack. */
5287 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5288 {
5289 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrCS.u;
5290 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrSS.u;
5291 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrDS.u;
5292 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrES.u;
5293 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrFS.u;
5294 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrGS.u;
5295 }
5296 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SEGMENT_REGS;
5297 }
5298
5299 return VINF_SUCCESS;
5300}
5301
5302
5303/**
5304 * Saves the guest descriptor table registers and task register from the current
5305 * VMCS into the guest-CPU context.
5306 *
5307 * @returns VBox status code.
5308 * @param pVCpu Pointer to the VMCPU.
5309 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5310 * out-of-sync. Make sure to update the required fields
5311 * before using them.
5312 *
5313 * @remarks No-long-jump zone!!!
5314 */
5315static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5316{
5317 int rc = VINF_SUCCESS;
5318
5319 /* Guest LDTR. */
5320 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_LDTR))
5321 {
5322 rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
5323 AssertRCReturn(rc, rc);
5324 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LDTR;
5325 }
5326
5327 /* Guest GDTR. */
5328 RTGCUINTREG uGCVal = 0;
5329 uint32_t u32Val = 0;
5330 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GDTR))
5331 {
5332 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &uGCVal); AssertRCReturn(rc, rc);
5333 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5334 pMixedCtx->gdtr.pGdt = uGCVal;
5335 pMixedCtx->gdtr.cbGdt = u32Val;
5336 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GDTR;
5337 }
5338
5339 /* Guest IDTR. */
5340 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_IDTR))
5341 {
5342 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &uGCVal); AssertRCReturn(rc, rc);
5343 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5344 pMixedCtx->idtr.pIdt = uGCVal;
5345 pMixedCtx->idtr.cbIdt = u32Val;
5346 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_IDTR;
5347 }
5348
5349 /* Guest TR. */
5350 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_TR))
5351 {
5352 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5353 AssertRCReturn(rc, rc);
5354
5355 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
5356 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5357 {
5358 rc = VMXLOCAL_READ_SEG(TR, tr);
5359 AssertRCReturn(rc, rc);
5360 }
5361 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_TR;
5362 }
5363 return rc;
5364}
5365
5366#undef VMXLOCAL_READ_SEG
5367
5368
5369/**
5370 * Saves the guest debug registers from the current VMCS into the guest-CPU
5371 * context.
5372 *
5373 * @returns VBox status code.
5374 * @param pVCpu Pointer to the VMCPU.
5375 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5376 * out-of-sync. Make sure to update the required fields
5377 * before using them.
5378 *
5379 * @remarks No-long-jump zone!!!
5380 */
5381static int hmR0VmxSaveGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5382{
5383 int rc = VINF_SUCCESS;
5384 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_DEBUG))
5385 {
5386 RTGCUINTREG uVal;
5387 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_DR7, &uVal); AssertRCReturn(rc, rc);
5388 pMixedCtx->dr[7] = uVal;
5389
5390 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_DEBUG;
5391 }
5392 return rc;
5393}
5394
5395
5396/**
5397 * Saves the guest APIC state from the currentl VMCS into the guest-CPU context.
5398 *
5399 * @returns VBox status code.
5400 * @param pVCpu Pointer to the VMCPU.
5401 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5402 * out-of-sync. Make sure to update the required fields
5403 * before using them.
5404 *
5405 * @remarks No-long-jump zone!!!
5406 */
5407static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5408{
5409 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
5410 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_APIC_STATE;
5411 return VINF_SUCCESS;
5412}
5413
5414
5415/**
5416 * Saves the entire guest state from the currently active VMCS into the
5417 * guest-CPU context. This essentially VMREADs all guest-data.
5418 *
5419 * @returns VBox status code.
5420 * @param pVCpu Pointer to the VMCPU.
5421 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5422 * out-of-sync. Make sure to update the required fields
5423 * before using them.
5424 */
5425static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5426{
5427 Assert(pVCpu);
5428 Assert(pMixedCtx);
5429
5430 if (pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL)
5431 return VINF_SUCCESS;
5432
5433 /* Though we can longjmp to ring-3 due to log-flushes here and get recalled again on the ring-3 callback path,
5434 there is no real need to. */
5435 if (VMMRZCallRing3IsEnabled(pVCpu))
5436 VMMR0LogFlushDisable(pVCpu);
5437 else
5438 Assert(VMMR0IsLogFlushDisabled(pVCpu));
5439 LogFunc(("\n"));
5440
5441 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
5442 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5443
5444 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
5445 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5446
5447 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
5448 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5449
5450 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
5451 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5452
5453 rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx);
5454 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5455
5456 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
5457 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5458
5459 rc = hmR0VmxSaveGuestFSBaseMsr(pVCpu, pMixedCtx);
5460 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestFSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5461
5462 rc = hmR0VmxSaveGuestGSBaseMsr(pVCpu, pMixedCtx);
5463 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5464
5465 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
5466 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5467
5468 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
5469 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5470
5471 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
5472 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5473
5474 AssertMsg(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL,
5475 ("Missed guest state bits while saving state; residue %RX32\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
5476
5477 if (VMMRZCallRing3IsEnabled(pVCpu))
5478 VMMR0LogFlushEnable(pVCpu);
5479
5480 return rc;
5481}
5482
5483
5484/**
5485 * Check per-VM and per-VCPU force flag actions that require us to go back to
5486 * ring-3 for one reason or another.
5487 *
5488 * @returns VBox status code (information status code included).
5489 * @retval VINF_SUCCESS if we don't have any actions that require going back to
5490 * ring-3.
5491 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
5492 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
5493 * interrupts)
5494 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
5495 * all EMTs to be in ring-3.
5496 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
5497 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
5498 * to the EM loop.
5499 *
5500 * @param pVM Pointer to the VM.
5501 * @param pVCpu Pointer to the VMCPU.
5502 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5503 * out-of-sync. Make sure to update the required fields
5504 * before using them.
5505 */
5506static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5507{
5508 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5509
5510 int rc = VERR_INTERNAL_ERROR_5;
5511 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
5512 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
5513 | VMCPU_FF_REQUEST | VMCPU_FF_HM_UPDATE_CR3 | VMCPU_FF_HM_UPDATE_PAE_PDPES))
5514 {
5515 /* We need the control registers now, make sure the guest-CPU context is updated. */
5516 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
5517 AssertRCReturn(rc, rc);
5518
5519 /* Pending HM CR3 sync. */
5520 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
5521 {
5522 rc = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
5523 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3);
5524 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5525 }
5526
5527 /* Pending HM PAE PDPEs. */
5528 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
5529 {
5530 rc = PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5531 AssertRC(rc);
5532 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5533 }
5534
5535 /* Pending PGM C3 sync. */
5536 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
5537 {
5538 rc = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
5539 if (rc != VINF_SUCCESS)
5540 {
5541 AssertRC(rc);
5542 Log(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
5543 return rc;
5544 }
5545 }
5546
5547 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
5548 /* -XXX- what was that about single stepping? */
5549 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
5550 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
5551 {
5552 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
5553 rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
5554 Log(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
5555 return rc;
5556 }
5557
5558 /* Pending VM request packets, such as hardware interrupts. */
5559 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
5560 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
5561 {
5562 Log(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
5563 return VINF_EM_PENDING_REQUEST;
5564 }
5565
5566 /* Pending PGM pool flushes. */
5567 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
5568 {
5569 Log(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
5570 return VINF_PGM_POOL_FLUSH_PENDING;
5571 }
5572
5573 /* Pending DMA requests. */
5574 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
5575 {
5576 Log(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
5577 return VINF_EM_RAW_TO_R3;
5578 }
5579 }
5580
5581 /* Paranoia. */
5582 Assert(rc != VERR_EM_INTERPRETER);
5583 return VINF_SUCCESS;
5584}
5585
5586
5587/**
5588 * Converts any TRPM trap into a pending VMX event. This is typically used when
5589 * entering from ring-3 (not longjmp returns).
5590 *
5591 * @param pVCpu Pointer to the VMCPU.
5592 */
5593static void hmR0VmxTRPMTrapToPendingEvent(PVMCPU pVCpu)
5594{
5595 Assert(TRPMHasTrap(pVCpu));
5596 Assert(!pVCpu->hm.s.Event.fPending);
5597
5598 uint8_t uVector;
5599 TRPMEVENT enmTrpmEvent;
5600 RTGCUINT uErrCode;
5601 RTGCUINTPTR GCPtrFaultAddress;
5602 uint8_t cbInstr;
5603
5604 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
5605 AssertRC(rc);
5606
5607 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntrInfo. */
5608 uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
5609 if (enmTrpmEvent == TRPM_TRAP)
5610 {
5611 switch (uVector)
5612 {
5613 case X86_XCPT_BP:
5614 case X86_XCPT_OF:
5615 {
5616 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5617 break;
5618 }
5619
5620 case X86_XCPT_PF:
5621 case X86_XCPT_DF:
5622 case X86_XCPT_TS:
5623 case X86_XCPT_NP:
5624 case X86_XCPT_SS:
5625 case X86_XCPT_GP:
5626 case X86_XCPT_AC:
5627 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5628 /* no break! */
5629 default:
5630 {
5631 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5632 break;
5633 }
5634 }
5635 }
5636 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
5637 {
5638 if (uVector != X86_XCPT_NMI)
5639 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5640 else
5641 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5642 }
5643 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
5644 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5645 else
5646 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
5647
5648 rc = TRPMResetTrap(pVCpu);
5649 AssertRC(rc);
5650 Log(("TRPM->HM event: u32IntrInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
5651 u32IntrInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
5652 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, uErrCode, GCPtrFaultAddress);
5653}
5654
5655
5656/**
5657 * Converts any pending VMX event into a TRPM trap. Typically used when leaving
5658 * VT-x to execute any instruction.
5659 *
5660 * @param pvCpu Pointer to the VMCPU.
5661 */
5662static void hmR0VmxPendingEventToTRPMTrap(PVMCPU pVCpu)
5663{
5664 Assert(pVCpu->hm.s.Event.fPending);
5665
5666 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
5667 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntrInfo);
5668 bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo);
5669 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
5670
5671 /* If a trap was already pending, we did something wrong! */
5672 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
5673
5674 TRPMEVENT enmTrapType;
5675 switch (uVectorType)
5676 {
5677 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
5678 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
5679 enmTrapType = TRPM_HARDWARE_INT;
5680 break;
5681 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
5682 enmTrapType = TRPM_SOFTWARE_INT;
5683 break;
5684 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
5685 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
5686 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
5687 enmTrapType = TRPM_TRAP;
5688 break;
5689 default:
5690 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
5691 enmTrapType = TRPM_32BIT_HACK;
5692 break;
5693 }
5694
5695 Log(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
5696 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
5697 AssertRC(rc);
5698
5699 if (fErrorCodeValid)
5700 TRPMSetErrorCode(pVCpu, uErrorCode);
5701 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5702 && uVector == X86_XCPT_PF)
5703 {
5704 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
5705 }
5706 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5707 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5708 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5709 {
5710 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5711 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
5712 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
5713 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
5714 }
5715 pVCpu->hm.s.Event.fPending = false;
5716}
5717
5718
5719/**
5720 * Does the necessary state syncing before doing a longjmp to ring-3.
5721 *
5722 * @param pVM Pointer to the VM.
5723 * @param pVCpu Pointer to the VMCPU.
5724 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5725 * out-of-sync. Make sure to update the required fields
5726 * before using them.
5727 * @param rcExit The reason for exiting to ring-3. Can be
5728 * VINF_VMM_UNKNOWN_RING3_CALL.
5729 *
5730 * @remarks No-long-jmp zone!!!
5731 */
5732static void hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
5733{
5734 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
5735 Assert(VMMR0IsLogFlushDisabled(pVCpu));
5736
5737 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
5738 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL);
5739 AssertRC(rc);
5740
5741 /* Restore FPU state if necessary and resync on next R0 reentry .*/
5742 if (CPUMIsGuestFPUStateActive(pVCpu))
5743 {
5744 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
5745 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
5746 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
5747 }
5748
5749 /* Restore debug registers if necessary and resync on next R0 reentry. */
5750 if (CPUMIsGuestDebugStateActive(pVCpu))
5751 {
5752 CPUMR0SaveGuestDebugState(pVM, pVCpu, pMixedCtx, true /* save DR6 */);
5753 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
5754 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
5755 }
5756 else if (CPUMIsHyperDebugStateActive(pVCpu))
5757 {
5758 CPUMR0LoadHostDebugState(pVM, pVCpu);
5759 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
5760 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
5761 }
5762
5763 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
5764 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
5765 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
5766 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
5767 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
5768 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
5769}
5770
5771
5772/**
5773 * An action requires us to go back to ring-3. This function does the necessary
5774 * steps before we can safely return to ring-3. This is not the same as longjmps
5775 * to ring-3, this is voluntary.
5776 *
5777 * @param pVM Pointer to the VM.
5778 * @param pVCpu Pointer to the VMCPU.
5779 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5780 * out-of-sync. Make sure to update the required fields
5781 * before using them.
5782 * @param rcExit The reason for exiting to ring-3. Can be
5783 * VINF_VMM_UNKNOWN_RING3_CALL.
5784 */
5785static void hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
5786{
5787 Assert(pVM);
5788 Assert(pVCpu);
5789 Assert(pMixedCtx);
5790 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
5791
5792 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_GUEST_STATE))
5793 {
5794 /* We want to see what the guest-state was before VM-entry, don't resync here, as we won't continue guest execution. */
5795 return;
5796 }
5797 else if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
5798 {
5799 VMXGetActivateVMCS(&pVCpu->hm.s.vmx.lasterror.u64VMCSPhys);
5800 pVCpu->hm.s.vmx.lasterror.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
5801 pVCpu->hm.s.vmx.lasterror.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
5802 pVCpu->hm.s.vmx.lasterror.idCurrentCpu = RTMpCpuId();
5803 return;
5804 }
5805
5806 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
5807 VMMRZCallRing3Disable(pVCpu);
5808 Log(("hmR0VmxExitToRing3: rcExit=%d\n", rcExit));
5809
5810 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
5811 if (pVCpu->hm.s.Event.fPending)
5812 {
5813 hmR0VmxPendingEventToTRPMTrap(pVCpu);
5814 Assert(!pVCpu->hm.s.Event.fPending);
5815 }
5816
5817 /* Sync. the guest state. */
5818 hmR0VmxLongJmpToRing3(pVM, pVCpu, pMixedCtx, rcExit);
5819 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
5820
5821 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
5822 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
5823 | CPUM_CHANGED_LDTR
5824 | CPUM_CHANGED_GDTR
5825 | CPUM_CHANGED_IDTR
5826 | CPUM_CHANGED_TR
5827 | CPUM_CHANGED_HIDDEN_SEL_REGS);
5828
5829 /* On our way back from ring-3 the following needs to be done. */
5830 /** @todo This can change with preemption hooks. */
5831 if (rcExit == VINF_EM_RAW_INTERRUPT)
5832 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
5833 else
5834 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
5835
5836 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
5837 VMMRZCallRing3Enable(pVCpu);
5838}
5839
5840
5841/**
5842 * VMMRZCallRing3 callback wrapper which saves the guest state before we
5843 * longjump to ring-3 and possibly get preempted.
5844 *
5845 * @param pVCpu Pointer to the VMCPU.
5846 * @param enmOperation The operation causing the ring-3 longjump.
5847 * @param pvUser The user argument (pointer to the possibly
5848 * out-of-date guest-CPU context).
5849 *
5850 * @remarks Must never be called with @a enmOperation ==
5851 * VMMCALLRING3_VM_R0_ASSERTION.
5852 */
5853DECLCALLBACK(void) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
5854{
5855 /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion, */
5856 Assert(pVCpu);
5857 Assert(pvUser);
5858 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5859 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
5860
5861 VMMRZCallRing3Disable(pVCpu);
5862 Assert(VMMR0IsLogFlushDisabled(pVCpu));
5863 Log(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3\n"));
5864 hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser, VINF_VMM_UNKNOWN_RING3_CALL);
5865 VMMRZCallRing3Enable(pVCpu);
5866}
5867
5868
5869/**
5870 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
5871 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
5872 *
5873 * @returns VBox status code.
5874 * @param pVCpu Pointer to the VMCPU.
5875 */
5876DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
5877{
5878 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
5879 {
5880 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
5881 {
5882 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
5883 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
5884 AssertRC(rc);
5885 }
5886 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
5887}
5888
5889
5890/**
5891 * Injects any pending events into the guest if the guest is in a state to
5892 * receive them.
5893 *
5894 * @returns VBox status code (informational status codes included).
5895 * @param pVCpu Pointer to the VMCPU.
5896 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5897 * out-of-sync. Make sure to update the required fields
5898 * before using them.
5899 */
5900static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5901{
5902 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
5903 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
5904 bool fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
5905 bool fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
5906
5907 Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS));
5908 Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/
5909 && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
5910 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5911 Assert(!TRPMHasTrap(pVCpu));
5912
5913 int rc = VINF_SUCCESS;
5914 if (pVCpu->hm.s.Event.fPending) /* First, inject any pending HM events. */
5915 {
5916 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
5917 bool fInject = true;
5918 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
5919 {
5920 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
5921 AssertRCReturn(rc, rc);
5922 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
5923 if ( fBlockInt
5924 || fBlockSti
5925 || fBlockMovSS)
5926 {
5927 fInject = false;
5928 }
5929 }
5930 else if ( uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
5931 && ( fBlockMovSS
5932 || fBlockSti))
5933 {
5934 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
5935 fInject = false;
5936 }
5937
5938 if (fInject)
5939 {
5940 Log(("Injecting pending event\n"));
5941 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.cbInstr,
5942 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState);
5943 AssertRCReturn(rc, rc);
5944 pVCpu->hm.s.Event.fPending = false;
5945 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntReinject);
5946 }
5947 else
5948 hmR0VmxSetIntWindowExitVmcs(pVCpu);
5949 } /** @todo SMI. SMIs take priority over NMIs. */
5950 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */
5951 {
5952 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
5953 if ( !fBlockMovSS
5954 && !fBlockSti)
5955 {
5956 Log(("Injecting NMI\n"));
5957 uint32_t u32IntrInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
5958 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5959 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */,
5960 0 /* GCPtrFaultAddress */, &uIntrState);
5961 AssertRCReturn(rc, rc);
5962 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
5963 }
5964 else
5965 hmR0VmxSetIntWindowExitVmcs(pVCpu);
5966 }
5967 else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
5968 {
5969 /* Check if there are guest external interrupts (PIC/APIC) pending and inject them if the guest can receive them. */
5970 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
5971 AssertRCReturn(rc, rc);
5972 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
5973 if ( !fBlockInt
5974 && !fBlockSti
5975 && !fBlockMovSS)
5976 {
5977 uint8_t u8Interrupt;
5978 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5979 if (RT_SUCCESS(rc))
5980 {
5981 Log(("Injecting interrupt u8Interrupt=%#x\n", u8Interrupt));
5982 uint32_t u32IntrInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID;
5983 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5984 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */,
5985 0 /* GCPtrFaultAddress */, &uIntrState);
5986 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
5987 }
5988 else
5989 {
5990 /** @todo Does this actually happen? If not turn it into an assertion. */
5991 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
5992 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
5993 rc = VINF_SUCCESS;
5994 }
5995 }
5996 else
5997 hmR0VmxSetIntWindowExitVmcs(pVCpu);
5998 }
5999
6000 /*
6001 * Delivery pending debug exception if the guest is single-stepping. The interruptibility-state could have been changed by
6002 * hmR0VmxInjectEventVmcs() (e.g. real-on-v86 injecting software interrupts), re-evaluate it and set the BS bit.
6003 */
6004 fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
6005 fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
6006 int rc2 = VINF_SUCCESS;
6007 if ( fBlockSti
6008 || fBlockMovSS)
6009 {
6010 if (!DBGFIsStepping(pVCpu))
6011 {
6012 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
6013 if (pMixedCtx->eflags.Bits.u1TF) /* We don't have any IA32_DEBUGCTL MSR for guests. Treat as all bits 0. */
6014 {
6015 /*
6016 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD, VMX_EXIT_MTF
6017 * VMX_EXIT_APIC_WRITE, VMX_EXIT_VIRTUALIZED_EOI. See Intel spec. 27.3.4 "Saving Non-Register State".
6018 */
6019 rc2 = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
6020 AssertRCReturn(rc, rc);
6021 }
6022 }
6023 else
6024 {
6025 /* We are single-stepping in the hypervisor debugger, clear interrupt inhibition as setting the BS bit would mean
6026 delivering a #DB to the guest upon VM-entry when it shouldn't be. */
6027 uIntrState = 0;
6028 }
6029 }
6030
6031 /*
6032 * There's no need to clear the VM entry-interruption information field here if we're not injecting anything.
6033 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6034 */
6035 rc2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
6036 AssertRC(rc2);
6037
6038 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
6039 return rc;
6040}
6041
6042
6043/**
6044 * Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
6045 *
6046 * @param pVCpu Pointer to the VMCPU.
6047 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6048 * out-of-sync. Make sure to update the required fields
6049 * before using them.
6050 */
6051DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6052{
6053 uint32_t u32IntrInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID;
6054 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6055}
6056
6057
6058/**
6059 * Injects a double-fault (#DF) exception into the VM.
6060 *
6061 * @returns VBox status code (informational status code included).
6062 * @param pVCpu Pointer to the VMCPU.
6063 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6064 * out-of-sync. Make sure to update the required fields
6065 * before using them.
6066 */
6067DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t *puIntrState)
6068{
6069 uint32_t u32IntrInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
6070 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6071 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6072 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
6073 puIntrState);
6074}
6075
6076
6077/**
6078 * Sets a debug (#DB) exception as pending-for-injection into the VM.
6079 *
6080 * @param pVCpu Pointer to the VMCPU.
6081 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6082 * out-of-sync. Make sure to update the required fields
6083 * before using them.
6084 */
6085DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6086{
6087 uint32_t u32IntrInfo = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID;
6088 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6089 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6090}
6091
6092
6093/**
6094 * Sets an overflow (#OF) exception as pending-for-injection into the VM.
6095 *
6096 * @param pVCpu Pointer to the VMCPU.
6097 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6098 * out-of-sync. Make sure to update the required fields
6099 * before using them.
6100 * @param cbInstr The value of RIP that is to be pushed on the guest
6101 * stack.
6102 */
6103DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
6104{
6105 uint32_t u32IntrInfo = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID;
6106 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6107 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6108}
6109
6110
6111/**
6112 * Injects a general-protection (#GP) fault into the VM.
6113 *
6114 * @returns VBox status code (informational status code included).
6115 * @param pVCpu Pointer to the VMCPU.
6116 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6117 * out-of-sync. Make sure to update the required fields
6118 * before using them.
6119 * @param u32ErrorCode The error code associated with the #GP.
6120 */
6121DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
6122 uint32_t *puIntrState)
6123{
6124 uint32_t u32IntrInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
6125 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6126 if (fErrorCodeValid)
6127 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6128 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
6129 puIntrState);
6130}
6131
6132
6133/**
6134 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
6135 *
6136 * @param pVCpu Pointer to the VMCPU.
6137 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6138 * out-of-sync. Make sure to update the required fields
6139 * before using them.
6140 * @param uVector The software interrupt vector number.
6141 * @param cbInstr The value of RIP that is to be pushed on the guest
6142 * stack.
6143 */
6144DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
6145{
6146 uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
6147 if ( uVector == X86_XCPT_BP
6148 || uVector == X86_XCPT_OF)
6149 {
6150 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6151 }
6152 else
6153 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6154 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6155}
6156
6157
6158/**
6159 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
6160 * stack.
6161 *
6162 * @returns VBox status code (information status code included).
6163 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
6164 * @param pVM Pointer to the VM.
6165 * @param pMixedCtx Pointer to the guest-CPU context.
6166 * @param uValue The value to push to the guest stack.
6167 */
6168DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
6169{
6170 /*
6171 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
6172 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
6173 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
6174 */
6175 if (pMixedCtx->sp == 1)
6176 return VINF_EM_RESET;
6177 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
6178 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
6179 AssertRCReturn(rc, rc);
6180 return rc;
6181}
6182
6183
6184/**
6185 * Injects an event into the guest upon VM-entry by updating the relevant fields
6186 * in the VM-entry area in the VMCS.
6187 *
6188 * @returns VBox status code (informational error codes included).
6189 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
6190 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
6191 *
6192 * @param pVCpu Pointer to the VMCPU.
6193 * @param pMixedCtx Pointer to the guest-CPU context. The data may
6194 * be out-of-sync. Make sure to update the required
6195 * fields before using them.
6196 * @param u64IntrInfo The VM-entry interruption-information field.
6197 * @param cbInstr The VM-entry instruction length in bytes (for
6198 * software interrupts, exceptions and privileged
6199 * software exceptions).
6200 * @param u32ErrCode The VM-entry exception error code.
6201 * @param GCPtrFaultAddress The page-fault address for #PF exceptions.
6202 * @param puIntrState Pointer to the current guest interruptibility-state.
6203 * This interruptibility-state will be updated if
6204 * necessary. This cannot not be NULL.
6205 *
6206 * @remarks No-long-jump zone!!!
6207 * @remarks Requires CR0!
6208 */
6209static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
6210 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState)
6211{
6212 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
6213 AssertMsg(u64IntrInfo >> 32 == 0, ("%#RX64\n", u64IntrInfo));
6214 Assert(puIntrState);
6215 uint32_t u32IntrInfo = (uint32_t)u64IntrInfo;
6216
6217 const uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntrInfo);
6218 const uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo);
6219
6220 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
6221 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6222 || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
6223
6224 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
6225
6226 /* We require CR0 to check if the guest is in real-mode. */
6227 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6228 AssertRCReturn(rc, rc);
6229
6230 /*
6231 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
6232 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
6233 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
6234 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
6235 */
6236 if (CPUMIsGuestInRealModeEx(pMixedCtx))
6237 {
6238 PVM pVM = pVCpu->CTX_SUFF(pVM);
6239 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
6240 {
6241 Assert(PDMVmmDevHeapIsEnabled(pVM));
6242 Assert(pVM->hm.s.vmx.pRealModeTSS);
6243
6244 /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */
6245 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6246 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
6247 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
6248 AssertRCReturn(rc, rc);
6249 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP);
6250
6251 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
6252 const size_t cbIdtEntry = 4;
6253 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
6254 {
6255 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
6256 if (uVector == X86_XCPT_DF)
6257 return VINF_EM_RESET;
6258 else if (uVector == X86_XCPT_GP)
6259 {
6260 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
6261 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, puIntrState);
6262 }
6263
6264 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
6265 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
6266 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, puIntrState);
6267 }
6268
6269 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
6270 uint16_t uGuestIp = pMixedCtx->ip;
6271 if (VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
6272 {
6273 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
6274 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
6275 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6276 }
6277 else if (VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
6278 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6279
6280 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
6281 uint16_t offIdtEntry = 0;
6282 RTSEL selIdtEntry = 0;
6283 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
6284 rc = PGMPhysSimpleReadGCPhys(pVM, &offIdtEntry, GCPhysIdtEntry, sizeof(offIdtEntry));
6285 rc |= PGMPhysSimpleReadGCPhys(pVM, &selIdtEntry, GCPhysIdtEntry + 2, sizeof(selIdtEntry));
6286 AssertRCReturn(rc, rc);
6287
6288 /* Construct the stack frame for the interrupt/exception handler. */
6289 rc = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
6290 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
6291 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
6292 AssertRCReturn(rc, rc);
6293
6294 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
6295 if (rc == VINF_SUCCESS)
6296 {
6297 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
6298 pMixedCtx->rip = offIdtEntry;
6299 pMixedCtx->cs.Sel = selIdtEntry;
6300 pMixedCtx->cs.u64Base = selIdtEntry << cbIdtEntry;
6301 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
6302 && uVector == X86_XCPT_PF)
6303 {
6304 pMixedCtx->cr2 = GCPtrFaultAddress;
6305 }
6306 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS
6307 | HM_CHANGED_GUEST_RIP
6308 | HM_CHANGED_GUEST_RFLAGS
6309 | HM_CHANGED_GUEST_RSP;
6310
6311 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
6312 if (*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
6313 {
6314 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6315 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
6316 Log(("Clearing inhibition due to STI.\n"));
6317 *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
6318 }
6319 Log(("Injecting real-mode: u32IntrInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntrInfo, u32ErrCode, cbInstr));
6320 }
6321 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
6322 return rc;
6323 }
6324 else
6325 {
6326 /*
6327 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
6328 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
6329 */
6330 u32IntrInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6331 }
6332 }
6333
6334 /* Validate. */
6335 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntrInfo)); /* Bit 31 (Valid bit) must be set by caller. */
6336 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK(u32IntrInfo)); /* Bit 12 MBZ. */
6337 Assert(!(u32IntrInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
6338
6339 /* Inject. */
6340 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntrInfo);
6341 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntrInfo))
6342 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
6343 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
6344
6345 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
6346 && uVector == X86_XCPT_PF)
6347 {
6348 pMixedCtx->cr2 = GCPtrFaultAddress;
6349 }
6350 Log(("Injecting u32IntrInfo=%#x u32ErrCode=%#x cbInstr=%#x uCR2=%#RGv\n", u32IntrInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
6351
6352 AssertRCReturn(rc, rc);
6353 return rc;
6354}
6355
6356
6357/**
6358 * Enters the VT-x session.
6359 *
6360 * @returns VBox status code.
6361 * @param pVM Pointer to the VM.
6362 * @param pVCpu Pointer to the VMCPU.
6363 * @param pCpu Pointer to the CPU info struct.
6364 */
6365VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
6366{
6367 AssertPtr(pVM);
6368 AssertPtr(pVCpu);
6369 Assert(pVM->hm.s.vmx.fSupported);
6370 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6371 NOREF(pCpu);
6372
6373 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6374
6375 /* Make sure we're in VMX root mode. */
6376 RTCCUINTREG u32HostCR4 = ASMGetCR4();
6377 if (!(u32HostCR4 & X86_CR4_VMXE))
6378 {
6379 LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
6380 return VERR_VMX_X86_CR4_VMXE_CLEARED;
6381 }
6382
6383 /* Load the active VMCS as the current one. */
6384 int rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
6385 if (RT_FAILURE(rc))
6386 return rc;
6387
6388 /** @todo this will change with preemption hooks where can can VMRESUME as long
6389 * as we're no preempted. */
6390 pVCpu->hm.s.fResumeVM = false;
6391 return VINF_SUCCESS;
6392}
6393
6394
6395/**
6396 * Leaves the VT-x session.
6397 *
6398 * @returns VBox status code.
6399 * @param pVM Pointer to the VM.
6400 * @param pVCpu Pointer to the VMCPU.
6401 * @param pCtx Pointer to the guest-CPU context.
6402 */
6403VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
6404{
6405 AssertPtr(pVCpu);
6406 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6407 NOREF(pVM);
6408 NOREF(pCtx);
6409
6410 /** @todo this will change with preemption hooks where we only VMCLEAR when
6411 * we are actually going to be preempted, not all the time like we
6412 * currently do. */
6413 /*
6414 * Sync the current VMCS (writes back internal data back into the VMCS region in memory)
6415 * and mark the VMCS launch-state as "clear".
6416 */
6417 int rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
6418 return rc;
6419}
6420
6421
6422/**
6423 * Saves the host state in the VMCS host-state.
6424 * Sets up the VM-exit MSR-load area.
6425 *
6426 * The CPU state will be loaded from these fields on every successful VM-exit.
6427 *
6428 * @returns VBox status code.
6429 * @param pVM Pointer to the VM.
6430 * @param pVCpu Pointer to the VMCPU.
6431 *
6432 * @remarks No-long-jump zone!!!
6433 */
6434VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
6435{
6436 AssertPtr(pVM);
6437 AssertPtr(pVCpu);
6438 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6439
6440 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6441
6442 /* Nothing to do if the host-state-changed flag isn't set. This will later be optimized when preemption hooks are in place. */
6443 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT))
6444 return VINF_SUCCESS;
6445
6446 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
6447 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6448
6449 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
6450 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6451
6452 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
6453 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6454
6455 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
6456 return rc;
6457}
6458
6459
6460/**
6461 * Loads the guest state into the VMCS guest-state area. The CPU state will be
6462 * loaded from these fields on every successful VM-entry.
6463 *
6464 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas.
6465 * Sets up the VM-entry controls.
6466 * Sets up the appropriate VMX non-root function to execute guest code based on
6467 * the guest CPU mode.
6468 *
6469 * @returns VBox status code.
6470 * @param pVM Pointer to the VM.
6471 * @param pVCpu Pointer to the VMCPU.
6472 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6473 * out-of-sync. Make sure to update the required fields
6474 * before using them.
6475 *
6476 * @remarks No-long-jump zone!!!
6477 */
6478VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6479{
6480 AssertPtr(pVM);
6481 AssertPtr(pVCpu);
6482 AssertPtr(pMixedCtx);
6483 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6484
6485 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6486
6487 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
6488
6489 /* Determine real-on-v86 mode. */
6490 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
6491 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
6492 && CPUMIsGuestInRealModeEx(pMixedCtx))
6493 {
6494 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
6495 }
6496
6497 /*
6498 * Load the guest-state into the VMCS.
6499 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
6500 * Ideally, assert that the cross-dependent bits are up to date at the point of using it.
6501 */
6502 int rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
6503 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6504
6505 rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
6506 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6507
6508 rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
6509 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6510
6511 rc = hmR0VmxLoadGuestControlRegs(pVCpu, pMixedCtx);
6512 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestControlRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6513
6514 /* Must be done after CR0 is loaded (strict builds require CR0 for segment register validation checks). */
6515 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
6516 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6517
6518 rc = hmR0VmxLoadGuestDebugRegs(pVCpu, pMixedCtx);
6519 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestDebugRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6520
6521 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
6522 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6523
6524 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
6525 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6526
6527 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
6528 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestGprs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6529
6530 rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
6531 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6532
6533 AssertMsg(!pVCpu->hm.s.fContextUseFlags,
6534 ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n",
6535 pVM, pVCpu, pVCpu->hm.s.fContextUseFlags));
6536
6537 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
6538 return rc;
6539}
6540
6541
6542/**
6543 * Does the preparations before executing guest code in VT-x.
6544 *
6545 * This may cause longjmps to ring-3 and may even result in rescheduling to the
6546 * recompiler. We must be cautious what we do here regarding committing
6547 * guest-state information into the the VMCS assuming we assuredly execute the
6548 * guest in VT-x. If we fall back to the recompiler after updating VMCS and
6549 * clearing the common-state (TRPM/forceflags), we must undo those changes so
6550 * that the recompiler can (and should) use them when it resumes guest
6551 * execution. Otherwise such operations must be done when we can no longer
6552 * exit to ring-3.
6553 *
6554 * @returns VBox status code (informational status codes included).
6555 * @retval VINF_SUCCESS if we can proceed with running the guest.
6556 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a double-fault
6557 * into the guest.
6558 * @retval VINF_* scheduling changes, we have to go back to ring-3.
6559 *
6560 * @param pVM Pointer to the VM.
6561 * @param pVCpu Pointer to the VMCPU.
6562 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6563 * out-of-sync. Make sure to update the required fields
6564 * before using them.
6565 * @param pVmxTransient Pointer to the VMX transient structure.
6566 *
6567 * @remarks Called with preemption disabled.
6568 */
6569DECLINLINE(int) hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6570{
6571 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6572
6573#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
6574 PGMRZDynMapFlushAutoSet(pVCpu);
6575#endif
6576
6577 /* Check force flag actions that might require us to go back to ring-3. */
6578 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
6579 if (rc != VINF_SUCCESS)
6580 return rc;
6581
6582 /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
6583 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
6584 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
6585 {
6586 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
6587 RTGCPHYS GCPhysApicBase;
6588 GCPhysApicBase = pMixedCtx->msrApicBase;
6589 GCPhysApicBase &= PAGE_BASE_GC_MASK;
6590
6591 /* Unalias any existing mapping. */
6592 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
6593 AssertRCReturn(rc, rc);
6594
6595 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
6596 Log(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));
6597 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
6598 AssertRCReturn(rc, rc);
6599
6600 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
6601 }
6602
6603#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
6604 /* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */
6605 pVmxTransient->uEFlags = ASMIntDisableFlags();
6606 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
6607 {
6608 ASMSetFlags(pVmxTransient->uEFlags);
6609 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
6610 /* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */
6611 return VINF_EM_RAW_INTERRUPT;
6612 }
6613 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
6614 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
6615#endif
6616
6617 /*
6618 * Evaluates and injects any pending events, toggling force-flags and updating the guest-interruptibility
6619 * state (interrupt shadow) in the VMCS. This -can- potentially be reworked to be done before disabling
6620 * interrupts and handle returning to ring-3 afterwards, but requires very careful state restoration.
6621 */
6622 /** @todo Rework event evaluation and injection to be complete separate. */
6623 if (TRPMHasTrap(pVCpu))
6624 hmR0VmxTRPMTrapToPendingEvent(pVCpu);
6625
6626 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx);
6627 AssertRCReturn(rc, rc);
6628 return rc;
6629}
6630
6631
6632/**
6633 * Prepares to run guest code in VT-x and we've committed to doing so. This
6634 * means there is no backing out to ring-3 or anywhere else at this
6635 * point.
6636 *
6637 * @param pVM Pointer to the VM.
6638 * @param pVCpu Pointer to the VMCPU.
6639 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6640 * out-of-sync. Make sure to update the required fields
6641 * before using them.
6642 * @param pVmxTransient Pointer to the VMX transient structure.
6643 *
6644 * @remarks Called with preemption disabled.
6645 * @remarks No-long-jump zone!!!
6646 */
6647DECLINLINE(void) hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6648{
6649 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6650 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6651
6652#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
6653 /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */
6654 pVmxTransient->uEFlags = ASMIntDisableFlags();
6655 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
6656#endif
6657
6658 /* Load the required guest state bits (for guest-state changes in the inner execution loop). */
6659 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT));
6660 Log4(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));
6661#ifdef HMVMX_SYNC_FULL_GUEST_STATE
6662 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
6663#endif
6664 int rc = VINF_SUCCESS;
6665 if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP)
6666 {
6667 rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
6668 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
6669 }
6670 else if (pVCpu->hm.s.fContextUseFlags)
6671 {
6672 rc = VMXR0LoadGuestState(pVM, pVCpu, pMixedCtx);
6673 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
6674 }
6675 AssertRC(rc);
6676 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
6677
6678 /* Cache the TPR-shadow for checking on every VM-exit if it might have changed. */
6679 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
6680 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
6681
6682 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
6683 || HMR0GetCurrentCpu()->idCpu != pVCpu->hm.s.idLastCpu)
6684 {
6685 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pMixedCtx);
6686 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
6687 }
6688
6689 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
6690 hmR0VmxFlushTaggedTlb(pVCpu); /* Invalidate the appropriate guest entries from the TLB. */
6691 Assert(HMR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu);
6692
6693 /*
6694 * TPR patching (only active for 32-bit guests on 64-bit capable CPUs) when the CPU does not supported virtualizing
6695 * APIC accesses feature (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).
6696 */
6697 if (pVM->hm.s.fTPRPatchingActive)
6698 {
6699 Assert(!CPUMIsGuestInLongMode(pVCpu));
6700
6701 /* Need guest's LSTAR MSR (which is part of the auto load/store MSRs in the VMCS), ensure we have the updated one. */
6702 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
6703 AssertRC(rc);
6704
6705 /* The patch code uses the LSTAR as it's not used by a guest in 32-bit mode implicitly (i.e. SYSCALL is 64-bit only). */
6706 pVmxTransient->u64LStarMsr = ASMRdMsr(MSR_K8_LSTAR);
6707 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR); /* pMixedCtx->msrLSTAR contains the guest's TPR,
6708 see hmR0VmxLoadGuestApicState(). */
6709 }
6710
6711#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
6712 /*
6713 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
6714 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
6715 */
6716 if ( (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
6717 && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
6718 {
6719 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
6720 uint64_t u64HostTscAux = 0;
6721 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux);
6722 AssertRC(rc2);
6723 ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux);
6724 }
6725#endif
6726
6727 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
6728 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
6729 to start executing. */
6730}
6731
6732
6733/**
6734 * Performs some essential restoration of state after running guest code in
6735 * VT-x.
6736 *
6737 * @param pVM Pointer to the VM.
6738 * @param pVCpu Pointer to the VMCPU.
6739 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6740 * out-of-sync. Make sure to update the required fields
6741 * before using them.
6742 * @param pVmxTransient Pointer to the VMX transient structure.
6743 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
6744 *
6745 * @remarks Called with interrupts disabled.
6746 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
6747 * unconditionally when it is safe to do so.
6748 */
6749DECLINLINE(void) hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
6750{
6751 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6752 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
6753
6754 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
6755 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
6756 pVCpu->hm.s.vmx.fUpdatedGuestState = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */
6757 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
6758 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
6759
6760 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
6761 {
6762#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
6763 /* Restore host's TSC_AUX. */
6764 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
6765 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
6766#endif
6767 /** @todo Find a way to fix hardcoding a guestimate. */
6768 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC()
6769 + pVCpu->hm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
6770 }
6771
6772 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
6773 Assert(!(ASMGetFlags() & X86_EFL_IF));
6774 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
6775
6776 /* Restore the effects of TPR patching if any. */
6777 if (pVM->hm.s.fTPRPatchingActive)
6778 {
6779 int rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
6780 AssertRC(rc);
6781 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); /* MSR_K8_LSTAR contains the guest TPR. */
6782 ASMWrMsr(MSR_K8_LSTAR, pVmxTransient->u64LStarMsr);
6783 }
6784
6785 ASMSetFlags(pVmxTransient->uEFlags); /* Enable interrupts. */
6786 pVCpu->hm.s.fResumeVM = true; /* Use VMRESUME instead of VMLAUNCH in the next run. */
6787
6788 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
6789 uint32_t uExitReason;
6790 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
6791 rc |= hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
6792 AssertRC(rc);
6793 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
6794 pVmxTransient->fVMEntryFailed = !!VMX_ENTRY_INTERRUPTION_INFO_VALID(pVmxTransient->uEntryIntrInfo);
6795
6796 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pMixedCtx);
6797 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
6798
6799 /* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */
6800 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
6801 {
6802 Log(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));
6803 return;
6804 }
6805
6806 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
6807 {
6808 /* Update the guest interruptibility-state from the VMCS. */
6809 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
6810#if defined(HMVMX_SYNC_FULL_GUEST_STATE) || defined(HMVMX_SAVE_FULL_GUEST_STATE)
6811 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6812 AssertRC(rc);
6813#endif
6814 /*
6815 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
6816 * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3 and which is
6817 * why it's done here as it's easier and no less efficient to deal with it here than making hmR0VmxSaveGuestState()
6818 * cope with longjmps safely (see VMCPU_FF_HM_UPDATE_CR3 handling).
6819 */
6820 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
6821 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
6822 {
6823 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
6824 AssertRC(rc);
6825 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
6826 }
6827 }
6828}
6829
6830
6831/**
6832 * Runs the guest code using VT-x.
6833 *
6834 * @returns VBox status code.
6835 * @param pVM Pointer to the VM.
6836 * @param pVCpu Pointer to the VMCPU.
6837 * @param pCtx Pointer to the guest-CPU context.
6838 *
6839 * @remarks Called with preemption disabled.
6840 */
6841VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
6842{
6843 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6844 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6845
6846 VMXTRANSIENT VmxTransient;
6847 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
6848 int rc = VERR_INTERNAL_ERROR_5;
6849 uint32_t cLoops = 0;
6850
6851 for (;; cLoops++)
6852 {
6853 Assert(!HMR0SuspendPending());
6854 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
6855 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
6856 (unsigned)RTMpCpuId(), cLoops));
6857
6858 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
6859 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
6860 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
6861 if (rc != VINF_SUCCESS)
6862 break;
6863
6864 /*
6865 * No longjmps to ring-3 from this point on!!!
6866 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
6867 * This also disables flushing of the R0-logger instance (if any).
6868 */
6869 VMMRZCallRing3Disable(pVCpu);
6870 VMMRZCallRing3RemoveNotification(pVCpu);
6871 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
6872
6873 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
6874 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
6875
6876 /*
6877 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
6878 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!!
6879 */
6880 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
6881 if (RT_UNLIKELY(rc != VINF_SUCCESS)) /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
6882 {
6883 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
6884 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
6885 return rc;
6886 }
6887
6888 /* Handle the VM-exit. */
6889 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
6890 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
6891 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
6892 HMVMX_START_EXIT_DISPATCH_PROF();
6893#ifdef HMVMX_USE_FUNCTION_TABLE
6894 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
6895#else
6896 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
6897#endif
6898 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
6899 if (rc != VINF_SUCCESS)
6900 break;
6901 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
6902 {
6903 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
6904 rc = VINF_EM_RAW_INTERRUPT;
6905 break;
6906 }
6907 }
6908
6909 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
6910 if (rc == VERR_EM_INTERPRETER)
6911 rc = VINF_EM_RAW_EMULATE_INSTR;
6912 else if (rc == VINF_EM_RESET)
6913 rc = VINF_EM_TRIPLE_FAULT;
6914 hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rc);
6915 return rc;
6916}
6917
6918
6919#ifndef HMVMX_USE_FUNCTION_TABLE
6920DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
6921{
6922 int rc;
6923 switch (rcReason)
6924 {
6925 case VMX_EXIT_EPT_MISCONFIG: rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); break;
6926 case VMX_EXIT_EPT_VIOLATION: rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); break;
6927 case VMX_EXIT_IO_INSTR: rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); break;
6928 case VMX_EXIT_CPUID: rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); break;
6929 case VMX_EXIT_RDTSC: rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); break;
6930 case VMX_EXIT_RDTSCP: rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); break;
6931 case VMX_EXIT_APIC_ACCESS: rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); break;
6932 case VMX_EXIT_XCPT_NMI: rc = hmR0VmxExitXcptNmi(pVCpu, pMixedCtx, pVmxTransient); break;
6933 case VMX_EXIT_MOV_CRX: rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); break;
6934 case VMX_EXIT_EXT_INT: rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); break;
6935 case VMX_EXIT_INT_WINDOW: rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); break;
6936 case VMX_EXIT_MWAIT: rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); break;
6937 case VMX_EXIT_MONITOR: rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); break;
6938 case VMX_EXIT_TASK_SWITCH: rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); break;
6939 case VMX_EXIT_PREEMPT_TIMER: rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); break;
6940 case VMX_EXIT_RDMSR: rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); break;
6941 case VMX_EXIT_WRMSR: rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); break;
6942 case VMX_EXIT_MOV_DRX: rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); break;
6943 case VMX_EXIT_TPR_BELOW_THRESHOLD: rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); break;
6944 case VMX_EXIT_HLT: rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); break;
6945 case VMX_EXIT_INVD: rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); break;
6946 case VMX_EXIT_INVLPG: rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); break;
6947 case VMX_EXIT_RSM: rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); break;
6948 case VMX_EXIT_MTF: rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); break;
6949 case VMX_EXIT_PAUSE: rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); break;
6950 case VMX_EXIT_XDTR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
6951 case VMX_EXIT_TR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
6952 case VMX_EXIT_WBINVD: rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); break;
6953 case VMX_EXIT_XSETBV: rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); break;
6954 case VMX_EXIT_RDRAND: rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); break;
6955 case VMX_EXIT_INVPCID: rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); break;
6956 case VMX_EXIT_GETSEC: rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); break;
6957 case VMX_EXIT_RDPMC: rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); break;
6958
6959 case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break;
6960 case VMX_EXIT_NMI_WINDOW: rc = hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient); break;
6961 case VMX_EXIT_INIT_SIGNAL: rc = hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient); break;
6962 case VMX_EXIT_SIPI: rc = hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient); break;
6963 case VMX_EXIT_IO_SMI: rc = hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient); break;
6964 case VMX_EXIT_SMI: rc = hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient); break;
6965 case VMX_EXIT_ERR_MSR_LOAD: rc = hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient); break;
6966 case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient); break;
6967 case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient); break;
6968
6969 case VMX_EXIT_VMCALL:
6970 case VMX_EXIT_VMCLEAR:
6971 case VMX_EXIT_VMLAUNCH:
6972 case VMX_EXIT_VMPTRLD:
6973 case VMX_EXIT_VMPTRST:
6974 case VMX_EXIT_VMREAD:
6975 case VMX_EXIT_VMRESUME:
6976 case VMX_EXIT_VMWRITE:
6977 case VMX_EXIT_VMXOFF:
6978 case VMX_EXIT_VMXON:
6979 case VMX_EXIT_INVEPT:
6980 case VMX_EXIT_INVVPID:
6981 case VMX_EXIT_VMFUNC:
6982 rc = hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
6983 break;
6984 default:
6985 rc = hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
6986 break;
6987 }
6988 return rc;
6989}
6990#endif
6991
6992#ifdef DEBUG
6993/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6994# define VMX_ASSERT_PREEMPT_CPUID_VAR() \
6995 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6996# define VMX_ASSERT_PREEMPT_CPUID() \
6997 do \
6998 { \
6999 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
7000 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
7001 } while (0)
7002
7003# define VMX_VALIDATE_EXIT_HANDLER_PARAMS() \
7004 do { \
7005 AssertPtr(pVCpu); \
7006 AssertPtr(pMixedCtx); \
7007 AssertPtr(pVmxTransient); \
7008 Assert(pVmxTransient->fVMEntryFailed == false); \
7009 Assert(ASMIntAreEnabled()); \
7010 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
7011 VMX_ASSERT_PREEMPT_CPUID_VAR(); \
7012 LogFunc(("vcpu[%u] vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n", \
7013 (unsigned)pVCpu->idCpu)); \
7014 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
7015 if (VMMR0IsLogFlushDisabled(pVCpu)) \
7016 VMX_ASSERT_PREEMPT_CPUID(); \
7017 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
7018 } while (0)
7019# define VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
7020 do { \
7021 LogFunc(("\n")); \
7022 } while(0)
7023#else /* Release builds */
7024# define VMX_VALIDATE_EXIT_HANDLER_PARAMS() do { HMVMX_STOP_EXIT_DISPATCH_PROF(); } while(0)
7025# define VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while(0)
7026#endif
7027
7028
7029/**
7030 * Advances the guest RIP after reading it from the VMCS.
7031 *
7032 * @returns VBox status code.
7033 * @param pVCpu Pointer to the VMCPU.
7034 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
7035 * out-of-sync. Make sure to update the required fields
7036 * before using them.
7037 * @param pVmxTransient Pointer to the VMX transient structure.
7038 *
7039 * @remarks No-long-jump zone!!!
7040 */
7041DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7042{
7043 int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7044 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
7045 AssertRCReturn(rc, rc);
7046
7047 pMixedCtx->rip += pVmxTransient->cbInstr;
7048 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7049 return rc;
7050}
7051
7052
7053/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7054/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
7055/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
7056/**
7057 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
7058 */
7059HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7060{
7061 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7062 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
7063#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
7064 Assert(ASMIntAreEnabled());
7065 return VINF_SUCCESS;
7066#else
7067 return VINF_EM_RAW_INTERRUPT;
7068#endif
7069}
7070
7071
7072/**
7073 * VM-exit handler for exceptions and NMIs (VMX_EXIT_XCPT_NMI).
7074 */
7075HMVMX_EXIT_DECL hmR0VmxExitXcptNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7076{
7077 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7078 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
7079
7080 int rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
7081 AssertRCReturn(rc, rc);
7082
7083 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntrInfo);
7084 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)
7085 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
7086
7087 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
7088 {
7089 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7090 return VINF_EM_RAW_INTERRUPT;
7091 }
7092
7093 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
7094 rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
7095 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
7096 {
7097 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7098 return VINF_SUCCESS;
7099 }
7100 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
7101 {
7102 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7103 return rc;
7104 }
7105
7106 uint32_t uExitIntrInfo = pVmxTransient->uExitIntrInfo;
7107 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntrInfo);
7108 switch (uIntrType)
7109 {
7110 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
7111 Assert(uVector == X86_XCPT_DB || uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
7112 /* no break */
7113 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
7114 {
7115 switch (uVector)
7116 {
7117 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
7118 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
7119 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;
7120 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
7121 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
7122 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
7123#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7124 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
7125 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7126 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
7127 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7128 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
7129 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7130 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
7131 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7132 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
7133 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7134#endif
7135 default:
7136 {
7137 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7138 AssertRCReturn(rc, rc);
7139
7140 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
7141 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
7142 {
7143 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
7144 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
7145 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7146 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
7147 AssertRCReturn(rc, rc);
7148 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntrInfo),
7149 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode,
7150 0 /* GCPtrFaultAddress */);
7151 AssertRCReturn(rc, rc);
7152 }
7153 else
7154 {
7155 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
7156 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
7157 }
7158 break;
7159 }
7160 }
7161 break;
7162 }
7163
7164 case VMX_EXIT_INTERRUPTION_INFO_TYPE_DB_XCPT:
7165 default:
7166 {
7167 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE;
7168 AssertMsgFailed(("Unexpected interruption code %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntrInfo)));
7169 break;
7170 }
7171 }
7172 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7173 return rc;
7174}
7175
7176
7177/**
7178 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7179 */
7180HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7181{
7182 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7183
7184 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7185 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7186 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7187 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7188 AssertRCReturn(rc, rc);
7189
7190 /* Deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and resume guest execution. */
7191 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
7192 return VINF_SUCCESS;
7193}
7194
7195
7196/**
7197 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7198 */
7199HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7200{
7201 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7202 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7203 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7204}
7205
7206
7207/**
7208 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7209 */
7210HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7211{
7212 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7213 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
7214 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7215}
7216
7217
7218/**
7219 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7220 */
7221HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7222{
7223 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7224 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
7225 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7226}
7227
7228
7229/**
7230 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7231 */
7232HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7233{
7234 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7235 PVM pVM = pVCpu->CTX_SUFF(pVM);
7236 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7237 if (RT_LIKELY(rc == VINF_SUCCESS))
7238 {
7239 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7240 Assert(pVmxTransient->cbInstr == 2);
7241 }
7242 else
7243 {
7244 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
7245 rc = VERR_EM_INTERPRETER;
7246 }
7247 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
7248 return rc;
7249}
7250
7251
7252/**
7253 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7254 */
7255HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7256{
7257 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7258 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
7259 AssertRCReturn(rc, rc);
7260
7261 if (pMixedCtx->cr4 & X86_CR4_SMXE)
7262 return VINF_EM_RAW_EMULATE_INSTR;
7263
7264 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
7265 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7266}
7267
7268
7269/**
7270 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7271 */
7272HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7273{
7274 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7275 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7276 AssertRCReturn(rc, rc);
7277
7278 PVM pVM = pVCpu->CTX_SUFF(pVM);
7279 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7280 if (RT_LIKELY(rc == VINF_SUCCESS))
7281 {
7282 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7283 Assert(pVmxTransient->cbInstr == 2);
7284 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
7285 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
7286 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7287 }
7288 else
7289 {
7290 AssertMsgFailed(("hmR0VmxExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc));
7291 rc = VERR_EM_INTERPRETER;
7292 }
7293 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
7294 return rc;
7295}
7296
7297
7298/**
7299 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7300 */
7301HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7302{
7303 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7304 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7305 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
7306 AssertRCReturn(rc, rc);
7307
7308 PVM pVM = pVCpu->CTX_SUFF(pVM);
7309 rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
7310 if (RT_LIKELY(rc == VINF_SUCCESS))
7311 {
7312 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7313 Assert(pVmxTransient->cbInstr == 3);
7314 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
7315 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
7316 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7317 }
7318 else
7319 {
7320 AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
7321 rc = VERR_EM_INTERPRETER;
7322 }
7323 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
7324 return rc;
7325}
7326
7327
7328/**
7329 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7330 */
7331HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7332{
7333 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7334 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7335 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
7336 AssertRCReturn(rc, rc);
7337
7338 PVM pVM = pVCpu->CTX_SUFF(pVM);
7339 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7340 if (RT_LIKELY(rc == VINF_SUCCESS))
7341 {
7342 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7343 Assert(pVmxTransient->cbInstr == 2);
7344 }
7345 else
7346 {
7347 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7348 rc = VERR_EM_INTERPRETER;
7349 }
7350 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
7351 return rc;
7352}
7353
7354
7355/**
7356 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7357 */
7358HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7359{
7360 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7361 PVM pVM = pVCpu->CTX_SUFF(pVM);
7362 Assert(!pVM->hm.s.fNestedPaging);
7363
7364 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
7365 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
7366 AssertRCReturn(rc, rc);
7367
7368 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
7369 rc = VBOXSTRICTRC_VAL(rc2);
7370 if (RT_LIKELY(rc == VINF_SUCCESS))
7371 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7372 else
7373 {
7374 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RGv failed with %Rrc\n",
7375 pVmxTransient->uExitQualification, rc));
7376 }
7377 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
7378 return rc;
7379}
7380
7381
7382/**
7383 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7384 */
7385HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7386{
7387 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7388 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7389 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7390 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7391 AssertRCReturn(rc, rc);
7392
7393 PVM pVM = pVCpu->CTX_SUFF(pVM);
7394 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7395 if (RT_LIKELY(rc == VINF_SUCCESS))
7396 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7397 else
7398 {
7399 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
7400 rc = VERR_EM_INTERPRETER;
7401 }
7402 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
7403 return rc;
7404}
7405
7406
7407/**
7408 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7409 */
7410HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7411{
7412 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7413 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7414 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7415 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7416 AssertRCReturn(rc, rc);
7417
7418 PVM pVM = pVCpu->CTX_SUFF(pVM);
7419 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7420 rc = VBOXSTRICTRC_VAL(rc2);
7421 if (RT_LIKELY( rc == VINF_SUCCESS
7422 || rc == VINF_EM_HALT))
7423 {
7424 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7425 AssertRCReturn(rc3, rc3);
7426
7427 if ( rc == VINF_EM_HALT
7428 && EMShouldContinueAfterHalt(pVCpu, pMixedCtx))
7429 {
7430 rc = VINF_SUCCESS;
7431 }
7432 }
7433 else
7434 {
7435 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
7436 rc = VERR_EM_INTERPRETER;
7437 }
7438 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
7439 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
7440 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
7441 return rc;
7442}
7443
7444
7445/**
7446 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
7447 */
7448HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7449{
7450 /*
7451 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
7452 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
7453 * executing VMCALL in VMX root operation. If we get here something funny is going on.
7454 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
7455 */
7456 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7457 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7458}
7459
7460
7461/**
7462 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
7463 */
7464HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7465{
7466 /*
7467 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
7468 * root operation. If we get there there is something funny going on.
7469 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
7470 */
7471 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7472 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7473}
7474
7475
7476/**
7477 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
7478 */
7479HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7480{
7481 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
7482 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7483 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7484}
7485
7486
7487/**
7488 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
7489 */
7490HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7491{
7492 /*
7493 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
7494 * don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
7495 * See Intel spec. 25.3 "Other Causes of VM-exits".
7496 */
7497 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7498 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7499}
7500
7501
7502/**
7503 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
7504 * VM-exit.
7505 */
7506HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7507{
7508 /*
7509 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM. See Intel spec. "33.14.1 Default Treatment of
7510 * SMI Delivery" and "29.3 VMX Instructions" for "VMXON". It is -NOT- blocked in VMX non-root operation so we can potentially
7511 * still get these exits. See Intel spec. "23.8 Restrictions on VMX operation".
7512 */
7513 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7514 return VINF_SUCCESS; /** @todo r=ramshankar: correct?. */
7515}
7516
7517
7518/**
7519 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7520 * VM-exit.
7521 */
7522HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7523{
7524 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7525 return VINF_EM_RESET;
7526}
7527
7528
7529/**
7530 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7531 */
7532HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7533{
7534 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7535 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
7536 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
7537 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7538 AssertRCReturn(rc, rc);
7539
7540 pMixedCtx->rip++;
7541 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7542 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
7543 rc = VINF_SUCCESS;
7544 else
7545 rc = VINF_EM_HALT;
7546
7547 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
7548 return rc;
7549}
7550
7551
7552/**
7553 * VM-exit handler for instructions that result in a #UD exception delivered to the guest.
7554 */
7555HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7556{
7557 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7558 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
7559 return VINF_SUCCESS;
7560}
7561
7562
7563/**
7564 * VM-exit handler for expiry of the VMX preemption timer.
7565 */
7566HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7567{
7568 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7569
7570 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
7571 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7572
7573 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7574 PVM pVM = pVCpu->CTX_SUFF(pVM);
7575 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7576 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
7577 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7578}
7579
7580
7581/**
7582 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7583 */
7584HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7585{
7586 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7587 /* We expose XSETBV to the guest, fallback to the recompiler for emulation. */
7588 /** @todo check if XSETBV is supported by the recompiler. */
7589 return VERR_EM_INTERPRETER;
7590}
7591
7592
7593/**
7594 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7595 */
7596HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7597{
7598 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7599 /* The guest should not invalidate the host CPU's TLBs, fallback to recompiler. */
7600 /** @todo implement EMInterpretInvpcid() */
7601 return VERR_EM_INTERPRETER;
7602}
7603
7604
7605/**
7606 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
7607 * Error VM-exit.
7608 */
7609HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7610{
7611 uint32_t uIntrState;
7612 HMVMXHCUINTREG uHCReg;
7613 uint64_t u64Val;
7614 uint32_t u32Val;
7615
7616 int rc = hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
7617 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
7618 rc |= hmR0VmxReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7619 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
7620 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
7621 AssertRCReturn(rc, rc);
7622
7623 Log(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntrInfo));
7624 Log(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7625 Log(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7626 Log(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));
7627
7628 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
7629 Log(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
7630 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
7631 Log(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
7632 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
7633 Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
7634 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
7635 Log(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
7636 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
7637 Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
7638 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7639 Log(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7640
7641 PVM pVM = pVCpu->CTX_SUFF(pVM);
7642 HMDumpRegs(pVM, pVCpu, pMixedCtx);
7643
7644 return VERR_VMX_INVALID_GUEST_STATE;
7645}
7646
7647
7648/**
7649 * VM-exit handler for VM-entry failure due to an MSR-load
7650 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
7651 */
7652HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7653{
7654 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7655 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7656}
7657
7658
7659/**
7660 * VM-exit handler for VM-entry failure due to a machine-check event
7661 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
7662 */
7663HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7664{
7665 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7666 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7667}
7668
7669
7670/**
7671 * VM-exit handler for all undefined reasons. Should never ever happen.. in
7672 * theory.
7673 */
7674HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7675{
7676 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
7677 return VERR_VMX_UNDEFINED_EXIT_CODE;
7678}
7679
7680
7681/**
7682 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
7683 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
7684 * Conditional VM-exit.
7685 */
7686HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7687{
7688 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7689 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
7690 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
7691 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
7692 return VERR_EM_INTERPRETER;
7693 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7694 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7695}
7696
7697
7698/**
7699 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
7700 */
7701HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7702{
7703 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7704 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
7705 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
7706 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
7707 return VERR_EM_INTERPRETER;
7708 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7709 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7710}
7711
7712
7713/**
7714 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7715 */
7716HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7717{
7718 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7719 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
7720 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7721 AssertRCReturn(rc, rc);
7722 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7723 AssertRCReturn(rc, rc);
7724 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7725 AssertRCReturn(rc, rc);
7726
7727 PVM pVM = pVCpu->CTX_SUFF(pVM);
7728 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7729 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
7730 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
7731 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
7732
7733 if (RT_LIKELY(rc == VINF_SUCCESS))
7734 {
7735 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7736 Assert(pVmxTransient->cbInstr == 2);
7737 }
7738 return rc;
7739}
7740
7741
7742/**
7743 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7744 */
7745HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7746{
7747 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7748 PVM pVM = pVCpu->CTX_SUFF(pVM);
7749 int rc = VINF_SUCCESS;
7750
7751 /* If TPR patching is active, LSTAR holds the guest TPR, writes to it must be propagated to the APIC. */
7752 if ( pVM->hm.s.fTPRPatchingActive
7753 && pMixedCtx->ecx == MSR_K8_LSTAR)
7754 {
7755 Assert(!CPUMIsGuestInLongModeEx(pMixedCtx)); /* Requires EFER but it's always up-to-date. */
7756 if ((pMixedCtx->eax & 0xff) != pVmxTransient->u8GuestTpr)
7757 {
7758 rc = PDMApicSetTPR(pVCpu, pMixedCtx->eax & 0xff);
7759 AssertRC(rc);
7760 }
7761
7762 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7763 Assert(pVmxTransient->cbInstr == 2);
7764 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
7765 return VINF_SUCCESS;
7766 }
7767
7768 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
7769 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7770 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7771 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7772 AssertRCReturn(rc, rc);
7773 Log(("ecx=%#RX32\n", pMixedCtx->ecx));
7774
7775 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7776 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
7777 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
7778
7779 if (RT_LIKELY(rc == VINF_SUCCESS))
7780 {
7781 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7782
7783 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7784 if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START
7785 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
7786 {
7787 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_APIC_STATE);
7788 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
7789 }
7790 else if (pMixedCtx->ecx == MSR_K6_EFER) /* EFER is the only MSR we auto-load but don't allow write-passthrough. */
7791 {
7792 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
7793 AssertRCReturn(rc, rc);
7794 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS;
7795 }
7796 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7797 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7798
7799 /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */
7800 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)))
7801 {
7802 switch (pMixedCtx->ecx)
7803 {
7804 case MSR_IA32_SYSENTER_CS: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_CS_MSR; break;
7805 case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;
7806 case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;
7807 case MSR_K8_FS_BASE: /* no break */
7808 case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS; break;
7809 /* MSR_K8_KERNEL_GS_BASE: Nothing to do as it's not part of the VMCS. Manually loaded each time on VM-entry. */
7810 }
7811 }
7812#ifdef VBOX_STRICT
7813 else
7814 {
7815 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7816 switch (pMixedCtx->ecx)
7817 {
7818 case MSR_IA32_SYSENTER_CS:
7819 case MSR_IA32_SYSENTER_EIP:
7820 case MSR_IA32_SYSENTER_ESP:
7821 case MSR_K8_FS_BASE:
7822 case MSR_K8_GS_BASE:
7823 {
7824 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
7825 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7826 }
7827
7828 case MSR_K8_LSTAR:
7829 case MSR_K6_STAR:
7830 case MSR_K8_SF_MASK:
7831 case MSR_K8_TSC_AUX:
7832 case MSR_K8_KERNEL_GS_BASE:
7833 {
7834 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7835 pMixedCtx->ecx));
7836 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7837 }
7838 }
7839 }
7840#endif /* VBOX_STRICT */
7841 }
7842 return rc;
7843}
7844
7845
7846/**
7847 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
7848 */
7849HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7850{
7851 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7852 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT. */
7853 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
7854 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT)
7855 return VERR_EM_INTERPRETER;
7856 AssertMsgFailed(("Unexpected PAUSE exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7857 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7858}
7859
7860
7861/**
7862 * VM-exit handler for when the TPR value is lowered below the specified
7863 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
7864 */
7865HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7866{
7867 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7868 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
7869
7870 /*
7871 * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
7872 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and
7873 * resume guest execution.
7874 */
7875 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
7876 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
7877 return VINF_SUCCESS;
7878}
7879
7880
7881/**
7882 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
7883 * VM-exit.
7884 *
7885 * @retval VINF_SUCCESS when guest execution can continue.
7886 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
7887 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
7888 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
7889 * recompiler.
7890 */
7891HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7892{
7893 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7894 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
7895 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
7896 AssertRCReturn(rc, rc);
7897
7898 const RTGCUINTPTR uExitQualification = pVmxTransient->uExitQualification;
7899 const uint32_t uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
7900 PVM pVM = pVCpu->CTX_SUFF(pVM);
7901 switch (uAccessType)
7902 {
7903 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */
7904 {
7905#if 0
7906 /* EMInterpretCRxWrite() references a lot of guest state (EFER, RFLAGS, Segment Registers, etc.) Sync entire state */
7907 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
7908#else
7909 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
7910 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
7911 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7912#endif
7913 AssertRCReturn(rc, rc);
7914
7915 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
7916 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
7917 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
7918 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
7919
7920 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
7921 {
7922 case 0: /* CR0 */
7923 Log(("CRX CR0 write rc=%d CR0=%#RGv\n", rc, pMixedCtx->cr0));
7924 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
7925 break;
7926 case 2: /* C2 **/
7927 /* Nothing to do here, CR2 it's not part of the VMCS. */
7928 break;
7929 case 3: /* CR3 */
7930 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
7931 Log(("CRX CR3 write rc=%d CR3=%#RGv\n", rc, pMixedCtx->cr3));
7932 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
7933 break;
7934 case 4: /* CR4 */
7935 Log(("CRX CR4 write rc=%d CR4=%#RGv\n", rc, pMixedCtx->cr4));
7936 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
7937 break;
7938 case 8: /* CR8 */
7939 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
7940 /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */
7941 /* We don't need to update HM_CHANGED_VMX_GUEST_APIC_STATE here as this -cannot- happen with TPR shadowing. */
7942 break;
7943 default:
7944 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
7945 break;
7946 }
7947
7948 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
7949 break;
7950 }
7951
7952 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */
7953 {
7954 /* EMInterpretCRxRead() requires EFER MSR, CS. */
7955 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7956 AssertRCReturn(rc, rc);
7957 Assert( !pVM->hm.s.fNestedPaging
7958 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
7959 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
7960
7961 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
7962 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
7963 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
7964
7965 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
7966 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
7967 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
7968 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
7969 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
7970 Log(("CRX CR%d Read access rc=%d\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), rc));
7971 break;
7972 }
7973
7974 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
7975 {
7976 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7977 AssertRCReturn(rc, rc);
7978 rc = EMInterpretCLTS(pVM, pVCpu);
7979 AssertRCReturn(rc, rc);
7980 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
7981 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
7982 Log(("CRX CLTS write rc=%d\n", rc));
7983 break;
7984 }
7985
7986 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
7987 {
7988 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7989 AssertRCReturn(rc, rc);
7990 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
7991 if (RT_LIKELY(rc == VINF_SUCCESS))
7992 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
7993 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
7994 Log(("CRX LMSW write rc=%d\n", rc));
7995 break;
7996 }
7997
7998 default:
7999 {
8000 AssertMsgFailed(("Invalid access-type in Mov CRx exit qualification %#x\n", uAccessType));
8001 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
8002 }
8003 }
8004
8005 /* Validate possible error codes. */
8006 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_CHANGE_MODE || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_SYNC_CR3
8007 || rc == VERR_VMX_UNEXPECTED_EXCEPTION);
8008 if (RT_SUCCESS(rc))
8009 {
8010 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8011 AssertRCReturn(rc2, rc2);
8012 }
8013
8014 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
8015 return rc;
8016}
8017
8018
8019/**
8020 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
8021 * VM-exit.
8022 */
8023HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8024{
8025 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8026 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
8027
8028 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8029 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8030 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
8031 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
8032 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
8033 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
8034 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
8035 AssertRCReturn(rc, rc);
8036 Log(("CS:RIP=%04x:%#RGv\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
8037
8038 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
8039 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
8040 uint32_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
8041 bool fIOWrite = (VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
8042 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
8043 bool fIOString = (VMX_EXIT_QUALIFICATION_IO_STRING(pVmxTransient->uExitQualification) == 1);
8044 Assert(uIOWidth == 0 || uIOWidth == 1 || uIOWidth == 3);
8045
8046 /* I/O operation lookup arrays. */
8047 static const uint32_t s_aIOSize[4] = { 1, 2, 0, 4 }; /* Size of the I/O Accesses. */
8048 static const uint32_t s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
8049
8050 const uint32_t cbSize = s_aIOSize[uIOWidth];
8051 const uint32_t cbInstr = pVmxTransient->cbInstr;
8052 PVM pVM = pVCpu->CTX_SUFF(pVM);
8053 if (fIOString)
8054 {
8055 /* INS/OUTS - I/O String instruction. */
8056 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
8057 /** @todo for now manually disassemble later optimize by getting the fields from
8058 * the VMCS. */
8059 /** @todo VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR contains the flat pointer
8060 * operand of the instruction. VMX_VMCS32_RO_EXIT_INSTR_INFO contains
8061 * segment prefix info. */
8062 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
8063 if (RT_SUCCESS(rc))
8064 {
8065 if (fIOWrite)
8066 {
8067 VBOXSTRICTRC rc2 = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
8068 (DISCPUMODE)pDis->uAddrMode, cbSize);
8069 rc = VBOXSTRICTRC_VAL(rc2);
8070 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
8071 }
8072 else
8073 {
8074 VBOXSTRICTRC rc2 = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
8075 (DISCPUMODE)pDis->uAddrMode, cbSize);
8076 rc = VBOXSTRICTRC_VAL(rc2);
8077 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
8078 }
8079 }
8080 else
8081 {
8082 AssertMsg(rc == VERR_EM_INTERPRETER, ("rc=%Rrc RIP %#RX64\n", rc, pMixedCtx->rip));
8083 rc = VINF_EM_RAW_EMULATE_INSTR;
8084 }
8085 }
8086 else
8087 {
8088 /* IN/OUT - I/O instruction. */
8089 const uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
8090 Assert(!VMX_EXIT_QUALIFICATION_IO_REP(pVmxTransient->uExitQualification));
8091 if (fIOWrite)
8092 {
8093 VBOXSTRICTRC rc2 = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbSize);
8094 rc = VBOXSTRICTRC_VAL(rc2);
8095 if (rc == VINF_IOM_R3_IOPORT_WRITE)
8096 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbSize);
8097 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
8098 }
8099 else
8100 {
8101 uint32_t u32Result = 0;
8102 VBOXSTRICTRC rc2 = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbSize);
8103 rc = VBOXSTRICTRC_VAL(rc2);
8104 if (IOM_SUCCESS(rc))
8105 {
8106 /* Save result of I/O IN instr. in AL/AX/EAX. */
8107 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8108 }
8109 else if (rc == VINF_IOM_R3_IOPORT_READ)
8110 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbSize);
8111 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
8112 }
8113 }
8114
8115 if (IOM_SUCCESS(rc))
8116 {
8117 pMixedCtx->rip += cbInstr;
8118 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
8119 if (RT_LIKELY(rc == VINF_SUCCESS))
8120 {
8121 rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx); /* For DR7. */
8122 AssertRCReturn(rc, rc);
8123
8124 /* If any IO breakpoints are armed, then we should check if a debug trap needs to be generated. */
8125 if (pMixedCtx->dr[7] & X86_DR7_ENABLED_MASK)
8126 {
8127 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
8128 for (unsigned i = 0; i < 4; i++)
8129 {
8130 uint32_t uBPLen = s_aIOSize[X86_DR7_GET_LEN(pMixedCtx->dr[7], i)];
8131 if ( ( uIOPort >= pMixedCtx->dr[i]
8132 && uIOPort < pMixedCtx->dr[i] + uBPLen)
8133 && (pMixedCtx->dr[7] & (X86_DR7_L(i) | X86_DR7_G(i)))
8134 && (pMixedCtx->dr[7] & X86_DR7_RW(i, X86_DR7_RW_IO)) == X86_DR7_RW(i, X86_DR7_RW_IO))
8135 {
8136 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8137 uint64_t uDR6 = ASMGetDR6();
8138
8139 /* Clear all breakpoint status flags and set the one we just hit. */
8140 uDR6 &= ~(X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3);
8141 uDR6 |= (uint64_t)RT_BIT(i);
8142
8143 /*
8144 * Note: AMD64 Architecture Programmer's Manual 13.1:
8145 * Bits 15:13 of the DR6 register is never cleared by the processor and must
8146 * be cleared by software after the contents have been read.
8147 */
8148 ASMSetDR6(uDR6);
8149
8150 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
8151 pMixedCtx->dr[7] &= ~X86_DR7_GD;
8152
8153 /* Paranoia. */
8154 pMixedCtx->dr[7] &= 0xffffffff; /* Upper 32 bits reserved. */
8155 pMixedCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* MBZ. */
8156 pMixedCtx->dr[7] |= 0x400; /* MB1. */
8157
8158 /* Resync DR7 */
8159 /** @todo probably cheaper to just reload DR7, nothing else needs changing. */
8160 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
8161
8162 /* Set #DB to be injected into the VM and continue guest execution. */
8163 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
8164 break;
8165 }
8166 }
8167 }
8168 }
8169 }
8170
8171#ifdef DEBUG
8172 if (rc == VINF_IOM_R3_IOPORT_READ)
8173 Assert(!fIOWrite);
8174 else if (rc == VINF_IOM_R3_IOPORT_WRITE)
8175 Assert(fIOWrite);
8176 else
8177 {
8178 AssertMsg( RT_FAILURE(rc)
8179 || rc == VINF_SUCCESS
8180 || rc == VINF_EM_RAW_EMULATE_INSTR
8181 || rc == VINF_EM_RAW_GUEST_TRAP
8182 || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", rc));
8183 }
8184#endif
8185
8186 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
8187 return rc;
8188}
8189
8190
8191/**
8192 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8193 * VM-exit.
8194 */
8195HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8196{
8197 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8198
8199 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8200 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8201 AssertRCReturn(rc, rc);
8202 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
8203 {
8204 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
8205 AssertRCReturn(rc, rc);
8206 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
8207 {
8208 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
8209 /* Software interrupts and exceptions will be regenerated when the recompiler restarts the instruction. */
8210 if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
8211 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
8212 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
8213 {
8214 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
8215 bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
8216
8217 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */
8218 Assert(!pVCpu->hm.s.Event.fPending);
8219 pVCpu->hm.s.Event.fPending = true;
8220 pVCpu->hm.s.Event.u64IntrInfo = pVmxTransient->uIdtVectoringInfo;
8221 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
8222 AssertRCReturn(rc, rc);
8223 if (fErrorCodeValid)
8224 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
8225 else
8226 pVCpu->hm.s.Event.u32ErrCode = 0;
8227 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
8228 && uVector == X86_XCPT_PF)
8229 {
8230 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2;
8231 }
8232 Log(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
8233 }
8234 }
8235 }
8236 /** @todo Emulate task switch someday, currently just going back to ring-3 for
8237 * emulation. */
8238 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
8239 return VERR_EM_INTERPRETER;
8240}
8241
8242
8243/**
8244 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8245 */
8246HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8247{
8248 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8249 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
8250 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
8251 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
8252 AssertRCReturn(rc, rc);
8253 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
8254 return VINF_EM_DBG_STOP;
8255}
8256
8257
8258/**
8259 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8260 */
8261HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8262{
8263 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8264
8265 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8266 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8267 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
8268 return VINF_SUCCESS;
8269 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8270 return rc;
8271
8272#if 0
8273 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
8274 * just sync the whole thing. */
8275 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8276#else
8277 /* Aggressive state sync. for now. */
8278 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8279 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8280 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8281#endif
8282 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8283 AssertRCReturn(rc, rc);
8284
8285 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8286 uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
8287 switch (uAccessType)
8288 {
8289 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8290 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8291 {
8292 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8293 && VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) == 0x80)
8294 {
8295 AssertMsgFailed(("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8296 }
8297
8298 RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
8299 GCPhys &= PAGE_BASE_GC_MASK;
8300 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
8301 PVM pVM = pVCpu->CTX_SUFF(pVM);
8302 Log(("ApicAccess uAccessType=%#x GCPhys=%RGp Off=%#x\n", uAccessType, GCPhys,
8303 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
8304
8305 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu,
8306 (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
8307 CPUMCTX2CORE(pMixedCtx), GCPhys);
8308 rc = VBOXSTRICTRC_VAL(rc2);
8309 Log(("ApicAccess rc=%d\n", rc));
8310 if ( rc == VINF_SUCCESS
8311 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8312 || rc == VERR_PAGE_NOT_PRESENT)
8313 {
8314 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8315 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8316 rc = VINF_SUCCESS;
8317 }
8318 break;
8319 }
8320
8321 default:
8322 Log(("ApicAccess uAccessType=%#x\n", uAccessType));
8323 rc = VINF_EM_RAW_EMULATE_INSTR;
8324 break;
8325 }
8326
8327 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
8328 return rc;
8329}
8330
8331
8332/**
8333 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8334 * VM-exit.
8335 */
8336HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8337{
8338 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8339
8340 /* We should -not- get this VM-exit if the guest is debugging. */
8341 if (CPUMIsGuestDebugStateActive(pVCpu))
8342 {
8343 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8344 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8345 }
8346
8347 int rc = VERR_INTERNAL_ERROR_5;
8348 if ( !DBGFIsStepping(pVCpu)
8349 && !CPUMIsHyperDebugStateActive(pVCpu))
8350 {
8351 /* Don't intercept MOV DRx. */
8352 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
8353 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
8354 AssertRCReturn(rc, rc);
8355
8356 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8357 PVM pVM = pVCpu->CTX_SUFF(pVM);
8358 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
8359 AssertRC(rc);
8360 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8361
8362#ifdef VBOX_WITH_STATISTICS
8363 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8364 AssertRCReturn(rc, rc);
8365 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
8366 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
8367 else
8368 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
8369#endif
8370 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
8371 return VINF_SUCCESS;
8372 }
8373
8374 /*
8375 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date, see
8376 * hmR0VmxSaveGuestAutoLoadStoreMsrs(). Update only the segment registers from the CPU.
8377 */
8378 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8379 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8380 AssertRCReturn(rc, rc);
8381
8382 PVM pVM = pVCpu->CTX_SUFF(pVM);
8383 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
8384 {
8385 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8386 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
8387 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
8388 if (RT_SUCCESS(rc))
8389 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
8390 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
8391 }
8392 else
8393 {
8394 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8395 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
8396 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
8397 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
8398 }
8399
8400 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8401 if (RT_SUCCESS(rc))
8402 {
8403 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8404 AssertRCReturn(rc2, rc2);
8405 }
8406 return rc;
8407}
8408
8409
8410/**
8411 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8412 * Conditional VM-exit.
8413 */
8414HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8415{
8416 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8417 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
8418
8419 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8420 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8421 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
8422 return VINF_SUCCESS;
8423 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8424 return rc;
8425
8426 RTGCPHYS GCPhys = 0;
8427 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
8428
8429#if 0
8430 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
8431#else
8432 /* Aggressive state sync. for now. */
8433 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8434 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8435 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8436#endif
8437 AssertRCReturn(rc, rc);
8438
8439 /*
8440 * If we succeed, resume guest execution.
8441 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8442 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8443 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8444 * weird case. See @bugref{6043}.
8445 */
8446 PVM pVM = pVCpu->CTX_SUFF(pVM);
8447 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
8448 rc = VBOXSTRICTRC_VAL(rc2);
8449 Log(("EPT misconfig at %#RGv RIP=%#RGv rc=%d\n", GCPhys, pMixedCtx->rip, rc));
8450 if ( rc == VINF_SUCCESS
8451 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8452 || rc == VERR_PAGE_NOT_PRESENT)
8453 {
8454 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8455 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8456 return VINF_SUCCESS;
8457 }
8458 return rc;
8459}
8460
8461
8462/**
8463 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8464 * VM-exit.
8465 */
8466HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8467{
8468 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8469 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
8470
8471 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8472 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8473 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
8474 return VINF_SUCCESS;
8475 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8476 return rc;
8477
8478 RTGCPHYS GCPhys = 0;
8479 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
8480 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8481#if 0
8482 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
8483#else
8484 /* Aggressive state sync. for now. */
8485 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8486 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8487 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8488#endif
8489 AssertRCReturn(rc, rc);
8490
8491 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
8492 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RGv", pVmxTransient->uExitQualification));
8493
8494 RTGCUINT uErrorCode = 0;
8495 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
8496 uErrorCode |= X86_TRAP_PF_ID;
8497 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
8498 uErrorCode |= X86_TRAP_PF_RW;
8499 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
8500 uErrorCode |= X86_TRAP_PF_P;
8501
8502 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8503
8504 Log(("EPT violation %#x at %#RGv ErrorCode %#x CS:EIP=%04x:%#RX64\n", (uint32_t)pVmxTransient->uExitQualification, GCPhys,
8505 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
8506
8507 /* Handle the pagefault trap for the nested shadow table. */
8508 PVM pVM = pVCpu->CTX_SUFF(pVM);
8509 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
8510 TRPMResetTrap(pVCpu);
8511
8512 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8513 if ( rc == VINF_SUCCESS
8514 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8515 || rc == VERR_PAGE_NOT_PRESENT)
8516 {
8517 /* Successfully synced our shadow page tables or emulation MMIO instruction. */
8518 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
8519 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8520 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8521 return VINF_SUCCESS;
8522 }
8523
8524 Log(("EPT return to ring-3 rc=%d\n"));
8525 return rc;
8526}
8527
8528
8529/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8530/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
8531/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8532/**
8533 * VM-exit exception handler for #MF (Math Fault: floating point exception).
8534 */
8535static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8536{
8537 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8538 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
8539
8540 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8541 AssertRCReturn(rc, rc);
8542
8543 if (!(pMixedCtx->cr0 & X86_CR0_NE))
8544 {
8545 /* Old-style FPU error reporting needs some extra work. */
8546 /** @todo don't fall back to the recompiler, but do it manually. */
8547 return VERR_EM_INTERPRETER;
8548 }
8549 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8550 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8551 return rc;
8552}
8553
8554
8555/**
8556 * VM-exit exception handler for #BP (Breakpoint exception).
8557 */
8558static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8559{
8560 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8561 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
8562
8563 /** @todo Try optimize this by not saving the entire guest state unless
8564 * really needed. */
8565 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8566 AssertRCReturn(rc, rc);
8567
8568 PVM pVM = pVCpu->CTX_SUFF(pVM);
8569 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8570 if (rc == VINF_EM_RAW_GUEST_TRAP)
8571 {
8572 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8573 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8574 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8575 AssertRCReturn(rc, rc);
8576
8577 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8578 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8579 }
8580
8581 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
8582 return rc;
8583}
8584
8585
8586/**
8587 * VM-exit exception handler for #DB (Debug exception).
8588 */
8589static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8590{
8591 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8592 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
8593
8594 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8595 AssertRCReturn(rc, rc);
8596
8597 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
8598 uint64_t uDR6 = X86_DR6_INIT_VAL;
8599 uDR6 |= (pVmxTransient->uExitQualification
8600 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
8601 PVM pVM = pVCpu->CTX_SUFF(pVM);
8602 rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6);
8603 if (rc == VINF_EM_RAW_GUEST_TRAP)
8604 {
8605 /* DR6, DR7.GD and IA32_DEBUGCTL.LBR are not updated yet. See Intel spec. 27.1 "Architectural State before a VM-Exit". */
8606 pMixedCtx->dr[6] = uDR6;
8607
8608 if (CPUMIsGuestDebugStateActive(pVCpu))
8609 ASMSetDR6(pMixedCtx->dr[6]);
8610
8611 rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx);
8612
8613 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
8614 pMixedCtx->dr[7] &= ~X86_DR7_GD;
8615
8616 /* Paranoia. */
8617 pMixedCtx->dr[7] &= 0xffffffff; /* upper 32 bits reserved */
8618 pMixedCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
8619 pMixedCtx->dr[7] |= 0x400; /* must be one */
8620
8621 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
8622 AssertRCReturn(rc,rc);
8623
8624 int rc2 = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8625 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8626 rc2 |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8627 AssertRCReturn(rc2, rc2);
8628 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8629 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8630 rc = VINF_SUCCESS;
8631 }
8632
8633 return rc;
8634}
8635
8636
8637/**
8638 * VM-exit exception handler for #NM (Device-not-available exception: floating
8639 * point exception).
8640 */
8641static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8642{
8643 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8644
8645#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
8646 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
8647#endif
8648
8649 /* We require CR0 and EFER. EFER is always up-to-date. */
8650 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8651 AssertRCReturn(rc, rc);
8652
8653 /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */
8654 PVM pVM = pVCpu->CTX_SUFF(pVM);
8655 rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8656 if (rc == VINF_SUCCESS)
8657 {
8658 Assert(CPUMIsGuestFPUStateActive(pVCpu));
8659 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
8660 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
8661 return VINF_SUCCESS;
8662 }
8663
8664 /* Forward #NM to the guest. */
8665 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
8666 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8667 AssertRCReturn(rc, rc);
8668 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8669 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
8670 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
8671 return rc;
8672}
8673
8674
8675/**
8676 * VM-exit exception handler for #GP (General-protection exception).
8677 *
8678 * @remarks Requires pVmxTransient->uExitIntrInfo to be up-to-date.
8679 */
8680static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8681{
8682 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8683 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
8684
8685 int rc = VERR_INTERNAL_ERROR_5;
8686 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
8687 {
8688#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
8689 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
8690 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8691 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8692 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8693 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8694 AssertRCReturn(rc, rc);
8695 Log(("#GP Gst: RIP %#RX64 ErrorCode=%#x CR0=%#RGv CPL=%u\n", pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode,
8696 pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu)));
8697 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8698 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8699 return rc;
8700#else
8701 /* We don't intercept #GP. */
8702 AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
8703 return VERR_VMX_UNEXPECTED_EXCEPTION;
8704#endif
8705 }
8706
8707 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
8708 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
8709
8710 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
8711 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8712 AssertRCReturn(rc, rc);
8713
8714 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
8715 uint32_t cbOp = 0;
8716 PVM pVM = pVCpu->CTX_SUFF(pVM);
8717 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
8718 if (RT_SUCCESS(rc))
8719 {
8720 rc = VINF_SUCCESS;
8721 Assert(cbOp == pDis->cbInstr);
8722 Log(("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
8723 switch (pDis->pCurInstr->uOpcode)
8724 {
8725 case OP_CLI:
8726 pMixedCtx->eflags.Bits.u1IF = 0;
8727 pMixedCtx->rip += pDis->cbInstr;
8728 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
8729 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
8730 break;
8731
8732 case OP_STI:
8733 pMixedCtx->eflags.Bits.u1IF = 1;
8734 pMixedCtx->rip += pDis->cbInstr;
8735 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
8736 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
8737 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
8738 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
8739 break;
8740
8741 case OP_HLT:
8742 rc = VINF_EM_HALT;
8743 pMixedCtx->rip += pDis->cbInstr;
8744 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
8745 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
8746 break;
8747
8748 case OP_POPF:
8749 {
8750 Log(("POPF CS:RIP %04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
8751 uint32_t cbParm = 0;
8752 uint32_t uMask = 0;
8753 if (pDis->fPrefix & DISPREFIX_OPSIZE)
8754 {
8755 cbParm = 4;
8756 uMask = 0xffffffff;
8757 }
8758 else
8759 {
8760 cbParm = 2;
8761 uMask = 0xffff;
8762 }
8763
8764 /* Get the stack pointer & pop the contents of the stack onto EFlags. */
8765 RTGCPTR GCPtrStack = 0;
8766 X86EFLAGS uEflags;
8767 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
8768 &GCPtrStack);
8769 if (RT_SUCCESS(rc))
8770 {
8771 Assert(sizeof(uEflags.u32) >= cbParm);
8772 uEflags.u32 = 0;
8773 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &uEflags.u32, cbParm);
8774 }
8775 if (RT_FAILURE(rc))
8776 {
8777 rc = VERR_EM_INTERPRETER;
8778 break;
8779 }
8780 Log(("POPF %x -> %#RGv mask=%x RIP=%#RX64\n", uEflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
8781 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
8782 | (uEflags.u32 & X86_EFL_POPF_BITS & uMask);
8783 /* The RF bit is always cleared by POPF; see Intel Instruction reference for POPF. */
8784 pMixedCtx->eflags.Bits.u1RF = 0;
8785 pMixedCtx->esp += cbParm;
8786 pMixedCtx->esp &= uMask;
8787 pMixedCtx->rip += pDis->cbInstr;
8788 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
8789 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
8790 break;
8791 }
8792
8793 case OP_PUSHF:
8794 {
8795 uint32_t cbParm = 0;
8796 uint32_t uMask = 0;
8797 if (pDis->fPrefix & DISPREFIX_OPSIZE)
8798 {
8799 cbParm = 4;
8800 uMask = 0xffffffff;
8801 }
8802 else
8803 {
8804 cbParm = 2;
8805 uMask = 0xffff;
8806 }
8807
8808 /* Get the stack pointer & push the contents of eflags onto the stack. */
8809 RTGCPTR GCPtrStack = 0;
8810 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
8811 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
8812 if (RT_FAILURE(rc))
8813 {
8814 rc = VERR_EM_INTERPRETER;
8815 break;
8816 }
8817 X86EFLAGS uEflags;
8818 uEflags = pMixedCtx->eflags;
8819 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
8820 uEflags.Bits.u1RF = 0;
8821 uEflags.Bits.u1VM = 0;
8822
8823 rc = PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &uEflags.u, cbParm);
8824 if (RT_FAILURE(rc))
8825 {
8826 rc = VERR_EM_INTERPRETER;
8827 break;
8828 }
8829 Log(("PUSHF %x -> %#RGv\n", uEflags.u, GCPtrStack));
8830 pMixedCtx->esp -= cbParm;
8831 pMixedCtx->esp &= uMask;
8832 pMixedCtx->rip += pDis->cbInstr;
8833 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP;
8834 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
8835 break;
8836 }
8837
8838 case OP_IRET:
8839 {
8840 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
8841 * instruction reference. */
8842 RTGCPTR GCPtrStack = 0;
8843 uint32_t uMask = 0xffff;
8844 uint16_t aIretFrame[3];
8845 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
8846 {
8847 rc = VERR_EM_INTERPRETER;
8848 break;
8849 }
8850 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
8851 &GCPtrStack);
8852 if (RT_SUCCESS(rc))
8853 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame));
8854 if (RT_FAILURE(rc))
8855 {
8856 rc = VERR_EM_INTERPRETER;
8857 break;
8858 }
8859 pMixedCtx->eip = 0;
8860 pMixedCtx->ip = aIretFrame[0];
8861 pMixedCtx->cs.Sel = aIretFrame[1];
8862 pMixedCtx->cs.ValidSel = aIretFrame[1];
8863 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
8864 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
8865 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
8866 pMixedCtx->sp += sizeof(aIretFrame);
8867 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_SEGMENT_REGS | HM_CHANGED_GUEST_RSP
8868 | HM_CHANGED_GUEST_RFLAGS;
8869 Log(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
8870 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
8871 break;
8872 }
8873
8874 case OP_INT:
8875 {
8876 uint16_t uVector = pDis->Param1.uValue & 0xff;
8877 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
8878 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
8879 break;
8880 }
8881
8882 case OP_INTO:
8883 {
8884 if (pMixedCtx->eflags.Bits.u1OF)
8885 {
8886 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
8887 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
8888 }
8889 break;
8890 }
8891
8892 default:
8893 {
8894 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
8895 EMCODETYPE_SUPERVISOR);
8896 rc = VBOXSTRICTRC_VAL(rc2);
8897 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
8898 Log(("#GP rc=%Rrc\n", rc));
8899 break;
8900 }
8901 }
8902 }
8903 else
8904 rc = VERR_EM_INTERPRETER;
8905
8906 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
8907 ("#GP Unexpected rc=%Rrc\n", rc));
8908 return rc;
8909}
8910
8911
8912/**
8913 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
8914 * the exception reported in the VMX transient structure back into the VM.
8915 *
8916 * @remarks Requires uExitIntrInfo in the VMX transient structure to be
8917 * up-to-date.
8918 */
8919static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8920{
8921 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8922
8923 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
8924 hmR0VmxCheckExitDueToEventDelivery(). */
8925 int rc = hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8926 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8927 AssertRCReturn(rc, rc);
8928 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
8929 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8930 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8931 return VINF_SUCCESS;
8932}
8933
8934
8935/**
8936 * VM-exit exception handler for #PF (Page-fault exception).
8937 */
8938static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8939{
8940 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8941 PVM pVM = pVCpu->CTX_SUFF(pVM);
8942 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8943 rc |= hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8944 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8945 AssertRCReturn(rc, rc);
8946
8947#if defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) || defined(HMVMX_ALWAYS_TRAP_PF)
8948 if (pVM->hm.s.fNestedPaging)
8949 {
8950 if (RT_LIKELY(!pVmxTransient->fVectoringPF))
8951 {
8952 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
8953 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
8954 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8955 0 /* cbInstr */, pVmxTransient->uExitIntrErrorCode, pVmxTransient->uExitQualification);
8956 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
8957 }
8958 else
8959 {
8960 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
8961 pVCpu->hm.s.Event.fPending = false; /* A vectoring #PF. */
8962 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
8963 Log(("Pending #DF due to vectoring #PF. NP\n"));
8964 }
8965 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
8966 return rc;
8967 }
8968#else
8969 Assert(!pVM->hm.s.fNestedPaging);
8970#endif
8971
8972#ifdef VBOX_HM_WITH_GUEST_PATCHING
8973 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8974 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8975 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8976 AssertRCReturn(rc, rc);
8977 /* Shortcut for APIC TPR access, only for 32-bit guests. */
8978 if ( pVM->hm.s.fTRPPatchingAllowed
8979 && pVM->hm.s.pGuestPatchMem
8980 && (pVmxTransient->uExitQualification & 0xfff) == 0x80 /* TPR offset */
8981 && !(pVmxTransient->uExitIntrErrorCode & X86_TRAP_PF_P) /* Page not present */
8982 && CPUMGetGuestCPL(pVCpu) == 0 /* Requires CR0, EFLAGS, segments. */
8983 && !CPUMIsGuestInLongModeEx(pMixedCtx) /* Requires EFER. */
8984 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
8985 {
8986 RTGCPHYS GCPhys;
8987 RTGCPHYS GCPhysApicBase = (pMixedCtx->msrApicBase & PAGE_BASE_GC_MASK);
8988 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pVmxTransient->uExitQualification, NULL /* pfFlags */, &GCPhys);
8989 if ( rc == VINF_SUCCESS
8990 && GCPhys == GCPhysApicBase)
8991 {
8992 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
8993 AssertRCReturn(rc, rc);
8994
8995 /* Only attempt to patch the instruction once. */
8996 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pMixedCtx->eip);
8997 if (!pPatch)
8998 return VINF_EM_HM_PATCH_TPR_INSTR;
8999 }
9000 }
9001#endif
9002
9003 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9004 AssertRCReturn(rc, rc);
9005
9006 Log(("#PF: cr2=%#RGv cs:rip=%#04x:%#RGv uErrCode %#RX32 cr3=%#RGv\n", pVmxTransient->uExitQualification, pMixedCtx->cs.Sel,
9007 pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode, pMixedCtx->cr3));
9008
9009 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntrErrorCode);
9010 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntrErrorCode, CPUMCTX2CORE(pMixedCtx),
9011 (RTGCPTR)pVmxTransient->uExitQualification);
9012
9013 Log(("#PF: rc=%Rrc\n", rc));
9014 if (rc == VINF_SUCCESS)
9015 {
9016 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
9017 /** @todo this isn't quite right, what if guest does lgdt with some MMIO
9018 * memory? We don't update the whole state here... */
9019 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9020 | HM_CHANGED_VMX_GUEST_APIC_STATE;
9021 TRPMResetTrap(pVCpu);
9022 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
9023 return rc;
9024 }
9025 else if (rc == VINF_EM_RAW_GUEST_TRAP)
9026 {
9027 if (!pVmxTransient->fVectoringPF)
9028 {
9029 /* It's a guest page fault and needs to be reflected to the guest. */
9030 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
9031 TRPMResetTrap(pVCpu);
9032 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
9033 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
9034 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
9035 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
9036 }
9037 else
9038 {
9039 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
9040 TRPMResetTrap(pVCpu);
9041 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF for replace it with #DF. */
9042 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
9043 Log(("#PF: Pending #DF due to vectoring #PF\n"));
9044 }
9045
9046 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
9047 return VINF_SUCCESS;
9048 }
9049
9050 TRPMResetTrap(pVCpu);
9051 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
9052 return rc;
9053}
9054
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette