VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 45888

Last change on this file since 45888 was 45888, checked in by vboxsync, 12 years ago

VMM/HMVMXR0: Avoid syncing TRPM and Pending HM events back and forth when teasing to enter VT-x but we keep returning to ring-3 for one reason or another. Now do the actual conversion later during event injection when, currently, it's too late to go back to ring-3.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 375.9 KB
Line 
1/* $Id: HMVMXR0.cpp 45888 2013-05-03 09:30:33Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <iprt/asm-amd64-x86.h>
23#include <iprt/thread.h>
24#include <iprt/string.h>
25
26#include "HMInternal.h"
27#include <VBox/vmm/vm.h>
28#include "HWVMXR0.h"
29#include <VBox/vmm/pdmapi.h>
30#include <VBox/vmm/dbgf.h>
31#include <VBox/vmm/iom.h>
32#include <VBox/vmm/selm.h>
33#include <VBox/vmm/tm.h>
34#ifdef VBOX_WITH_REM
35# include <VBox/vmm/rem.h>
36#endif
37#ifdef DEBUG_ramshankar
38#define HMVMX_SAVE_FULL_GUEST_STATE
39#define HMVMX_SYNC_FULL_GUEST_STATE
40#define HMVMX_ALWAYS_TRAP_ALL_XCPTS
41#define HMVMX_ALWAYS_TRAP_PF
42#endif
43
44
45/*******************************************************************************
46* Defined Constants And Macros *
47*******************************************************************************/
48#define HMVMXHCUINTREG RTHCUINTREG
49#if defined(RT_ARCH_AMD64)
50# define HMVMX_IS_64BIT_HOST_MODE() (true)
51#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
52extern "C" uint32_t g_fVMXIs64bitHost;
53# define HMVMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)
54# undef HMVMXHCUINTREG
55# define HMVMXHCUINTREG uint64_t
56#else
57# define HMVMX_IS_64BIT_HOST_MODE() (false)
58#endif
59
60/** Use the function table. */
61#define HMVMX_USE_FUNCTION_TABLE
62
63/** This bit indicates the segment selector is unusable in VT-x. */
64#define HMVMX_SEL_UNUSABLE RT_BIT(16)
65
66/** Determine which tagged-TLB flush handler to use. */
67#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
68#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
69#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
70#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
71
72/** Updated-guest-state flags. */
73#define HMVMX_UPDATED_GUEST_RIP RT_BIT(0)
74#define HMVMX_UPDATED_GUEST_RSP RT_BIT(1)
75#define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2)
76#define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3)
77#define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4)
78#define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5)
79#define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6)
80#define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7)
81#define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8)
82#define HMVMX_UPDATED_GUEST_TR RT_BIT(9)
83#define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10)
84#define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(11)
85#define HMVMX_UPDATED_GUEST_FS_BASE_MSR RT_BIT(12)
86#define HMVMX_UPDATED_GUEST_GS_BASE_MSR RT_BIT(13)
87#define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(14)
88#define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(15)
89#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(16)
90#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(17)
91#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(18)
92#define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19)
93#define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \
94 | HMVMX_UPDATED_GUEST_RSP \
95 | HMVMX_UPDATED_GUEST_RFLAGS \
96 | HMVMX_UPDATED_GUEST_CR0 \
97 | HMVMX_UPDATED_GUEST_CR3 \
98 | HMVMX_UPDATED_GUEST_CR4 \
99 | HMVMX_UPDATED_GUEST_GDTR \
100 | HMVMX_UPDATED_GUEST_IDTR \
101 | HMVMX_UPDATED_GUEST_LDTR \
102 | HMVMX_UPDATED_GUEST_TR \
103 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \
104 | HMVMX_UPDATED_GUEST_DEBUG \
105 | HMVMX_UPDATED_GUEST_FS_BASE_MSR \
106 | HMVMX_UPDATED_GUEST_GS_BASE_MSR \
107 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \
108 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \
109 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \
110 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \
111 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \
112 | HMVMX_UPDATED_GUEST_APIC_STATE)
113
114/**
115 * Flags to skip redundant reads of some common VMCS fields that are not part of
116 * the guest-CPU state but are in the transient structure.
117 */
118#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
119#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
120#define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
121#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3)
122#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
123#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
124
125/**
126 * Exception bitmap mask for real-mode guests (real-on-v86). We need to intercept all exceptions manually (except #PF).
127 * #NM is also handled spearetely, see hmR0VmxLoadGuestControlRegs(). #PF need not be intercepted even in real-mode if
128 * we have Nested Paging support.
129 */
130#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) \
131 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
132 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
133 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
134 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
135 | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \
136 | RT_BIT(X86_XCPT_XF))
137
138/**
139 * Exception bitmap mask for all contributory exceptions.
140 */
141#define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
142 | RT_BIT(X86_XCPT_DE))
143
144/** Maximum VM-instruction error number. */
145#define HMVMX_INSTR_ERROR_MAX 28
146
147/** Profiling macro. */
148#ifdef HM_PROFILE_EXIT_DISPATCH
149# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
150# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
151#else
152# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
153# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
154#endif
155
156
157/*******************************************************************************
158* Structures and Typedefs *
159*******************************************************************************/
160/**
161 * A state structure for holding miscellaneous information across
162 * VMX non-root operation and restored after the transition.
163 */
164typedef struct VMXTRANSIENT
165{
166 /** The host's rflags/eflags. */
167 RTCCUINTREG uEFlags;
168#if HC_ARCH_BITS == 32
169 uint32_t u32Alignment0;
170#endif
171 /** The guest's LSTAR MSR value used for TPR patching for 32-bit guests. */
172 uint64_t u64LStarMsr;
173 /** The guest's TPR value used for TPR shadowing. */
174 uint8_t u8GuestTpr;
175 /** Alignment. */
176 uint8_t abAlignment0[6];
177
178 /** The basic VM-exit reason. */
179 uint16_t uExitReason;
180 /** Alignment. */
181 uint16_t u16Alignment0;
182 /** The VM-exit interruption error code. */
183 uint32_t uExitIntrErrorCode;
184 /** The VM-exit exit qualification. */
185 RTGCUINTPTR uExitQualification;
186#if GC_ARCH_BITS == 32
187 /** Alignment. */
188 uint32_t u32Alignment1;
189#endif
190
191 /** The VM-exit interruption-information field. */
192 uint32_t uExitIntrInfo;
193 /** The VM-exit instruction-length field. */
194 uint32_t cbInstr;
195 /** Whether the VM-entry failed or not. */
196 bool fVMEntryFailed;
197 /** Alignment. */
198 uint8_t abAlignment1[5];
199
200 /** The VM-entry interruption-information field. */
201 uint32_t uEntryIntrInfo;
202 /** The VM-entry exception error code field. */
203 uint32_t uEntryXcptErrorCode;
204 /** The VM-entry instruction length field. */
205 uint32_t cbEntryInstr;
206
207 /** IDT-vectoring information field. */
208 uint32_t uIdtVectoringInfo;
209 /** IDT-vectoring error code. */
210 uint32_t uIdtVectoringErrorCode;
211
212 /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
213 uint32_t fVmcsFieldsRead;
214 /** Whether TSC-offsetting should be setup before VM-entry. */
215 bool fUpdateTscOffsettingAndPreemptTimer;
216 /** Whether the VM-exit was caused by a page-fault during delivery of a
217 * contributary exception or a page-fault. */
218 bool fVectoringPF;
219} VMXTRANSIENT, *PVMXTRANSIENT;
220AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
221AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntrInfo, sizeof(uint64_t));
222AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntrInfo, sizeof(uint64_t));
223
224
225/**
226 * MSR-bitmap read permissions.
227 */
228typedef enum VMXMSREXITREAD
229{
230 /** Reading this MSR causes a VM-exit. */
231 VMXMSREXIT_INTERCEPT_READ = 0xb,
232 /** Reading this MSR does not cause a VM-exit. */
233 VMXMSREXIT_PASSTHRU_READ
234} VMXMSREXITREAD;
235
236/**
237 * MSR-bitmap write permissions.
238 */
239typedef enum VMXMSREXITWRITE
240{
241 /** Writing to this MSR causes a VM-exit. */
242 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
243 /** Writing to this MSR does not cause a VM-exit. */
244 VMXMSREXIT_PASSTHRU_WRITE
245} VMXMSREXITWRITE;
246
247
248/*******************************************************************************
249* Internal Functions *
250*******************************************************************************/
251static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr);
252static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
253 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState);
254#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
255static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
256#endif
257#ifndef HMVMX_USE_FUNCTION_TABLE
258DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
259#define HMVMX_EXIT_DECL static int
260#else
261#define HMVMX_EXIT_DECL static DECLCALLBACK(int)
262#endif
263
264HMVMX_EXIT_DECL hmR0VmxExitXcptNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
265HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
266HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
267HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
268HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
269HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
270HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
271HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
272HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
273HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
274HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
275HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
276HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
277HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
278HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
279HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
280HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
281HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
282HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
283HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
284HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
285HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
286HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
287HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
288HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
289HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
290HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
291HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
292HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
293HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
294HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
295HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
296HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
297HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
298HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
299HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
300HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
301HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
302HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
303HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
304HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
305HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
306HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
307HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
308
309static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
310static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
311static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
312static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
313static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
314static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
315static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
316
317
318/*******************************************************************************
319* Global Variables *
320*******************************************************************************/
321#ifdef HMVMX_USE_FUNCTION_TABLE
322/**
323 * VM-exit handler.
324 *
325 * @returns VBox status code.
326 * @param pVCpu Pointer to the VMCPU.
327 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
328 * out-of-sync. Make sure to update the required
329 * fields before using them.
330 * @param pVmxTransient Pointer to the VMX-transient structure.
331 */
332typedef DECLCALLBACK(int) FNVMEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
333/** Pointer to VM-exit handler. */
334typedef FNVMEXITHANDLER *const PFNVMEXITHANDLER;
335
336/**
337 * VMX_EXIT dispatch table.
338 */
339static const PFNVMEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
340{
341 /* 00 VMX_EXIT_XCPT_NMI */ hmR0VmxExitXcptNmi,
342 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
343 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
344 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
345 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
346 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
347 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
348 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
349 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
350 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
351 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
352 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
353 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
354 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
355 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
356 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
357 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
358 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
359 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitSetPendingXcptUD,
360 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
361 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
362 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
363 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
364 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
365 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
366 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
367 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
368 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
369 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
370 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
371 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
372 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
373 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
374 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
375 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
376 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
377 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
378 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
379 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
380 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
381 /* 40 UNDEFINED */ hmR0VmxExitPause,
382 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
383 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
384 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
385 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
386 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
387 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
388 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
389 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
390 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
391 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
392 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
393 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
394 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
395 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
396 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
397 /* 56 UNDEFINED */ hmR0VmxExitErrUndefined,
398 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
399 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
400 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD
401};
402#endif /* HMVMX_USE_FUNCTION_TABLE */
403
404#ifdef VBOX_STRICT
405static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
406{
407 /* 0 */ "(Not Used)",
408 /* 1 */ "VMCALL executed in VMX root operation.",
409 /* 2 */ "VMCLEAR with invalid physical address.",
410 /* 3 */ "VMCLEAR with VMXON pointer.",
411 /* 4 */ "VMLAUNCH with non-clear VMCS.",
412 /* 5 */ "VMRESUME with non-launched VMCS.",
413 /* 6 */ "VMRESUME after VMXOFF",
414 /* 7 */ "VM entry with invalid control fields.",
415 /* 8 */ "VM entry with invalid host state fields.",
416 /* 9 */ "VMPTRLD with invalid physical address.",
417 /* 10 */ "VMPTRLD with VMXON pointer.",
418 /* 11 */ "VMPTRLD with incorrect revision identifier.",
419 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
420 /* 13 */ "VMWRITE to read-only VMCS component.",
421 /* 14 */ "(Not Used)",
422 /* 15 */ "VMXON executed in VMX root operation.",
423 /* 16 */ "VM entry with invalid executive-VMCS pointer.",
424 /* 17 */ "VM entry with non-launched executing VMCS.",
425 /* 18 */ "VM entry with executive-VMCS pointer not VMXON pointer.",
426 /* 19 */ "VMCALL with non-clear VMCS.",
427 /* 20 */ "VMCALL with invalid VM-exit control fields.",
428 /* 21 */ "(Not Used)",
429 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
430 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
431 /* 24 */ "VMCALL with invalid SMM-monitor features.",
432 /* 25 */ "VM entry with invalid VM-execution control fields in executive VMCS.",
433 /* 26 */ "VM entry with events blocked by MOV SS.",
434 /* 27 */ "(Not Used)",
435 /* 28 */ "Invalid operand to INVEPT/INVVPID."
436};
437#endif /* VBOX_STRICT */
438
439
440
441/**
442 * Updates the VM's last error record. If there was a VMX instruction error,
443 * reads the error data from the VMCS and updates VCPU's last error record as
444 * well.
445 *
446 * @param pVM Pointer to the VM.
447 * @param pVCpu Pointer to the VMCPU (can be NULL if @a rc is not
448 * VERR_VMX_UNABLE_TO_START_VM or
449 * VERR_VMX_INVALID_VMCS_FIELD).
450 * @param rc The error code.
451 */
452static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
453{
454 AssertPtr(pVM);
455 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
456 || rc == VERR_VMX_UNABLE_TO_START_VM)
457 {
458 AssertPtrReturnVoid(pVCpu);
459 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.lasterror.u32InstrError);
460 }
461 pVM->hm.s.lLastError = rc;
462}
463
464
465/**
466 * Reads the VM-entry interruption-information field from the VMCS into the VMX
467 * transient structure.
468 *
469 * @returns VBox status code.
470 * @param pVmxTransient Pointer to the VMX transient structure.
471 *
472 * @remarks No-long-jump zone!!!
473 */
474DECLINLINE(int) hmR0VmxReadEntryIntrInfoVmcs(PVMXTRANSIENT pVmxTransient)
475{
476 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntrInfo);
477 AssertRCReturn(rc, rc);
478 return VINF_SUCCESS;
479}
480
481
482/**
483 * Reads the VM-entry exception error code field from the VMCS into
484 * the VMX transient structure.
485 *
486 * @returns VBox status code.
487 * @param pVmxTransient Pointer to the VMX transient structure.
488 *
489 * @remarks No-long-jump zone!!!
490 */
491DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
492{
493 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
494 AssertRCReturn(rc, rc);
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Reads the VM-entry exception error code field from the VMCS into
501 * the VMX transient structure.
502 *
503 * @returns VBox status code.
504 * @param pVCpu Pointer to the VMCPU.
505 * @param pVmxTransient Pointer to the VMX transient structure.
506 *
507 * @remarks No-long-jump zone!!!
508 */
509DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
510{
511 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
512 AssertRCReturn(rc, rc);
513 return VINF_SUCCESS;
514}
515
516
517/**
518 * Reads the VM-exit interruption-information field from the VMCS into the VMX
519 * transient structure.
520 *
521 * @returns VBox status code.
522 * @param pVCpu Pointer to the VMCPU.
523 * @param pVmxTransient Pointer to the VMX transient structure.
524 */
525DECLINLINE(int) hmR0VmxReadExitIntrInfoVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
526{
527 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
528 {
529 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntrInfo);
530 AssertRCReturn(rc, rc);
531 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
532 }
533 return VINF_SUCCESS;
534}
535
536
537/**
538 * Reads the VM-exit interruption error code from the VMCS into the VMX
539 * transient structure.
540 *
541 * @returns VBox status code.
542 * @param pVCpu Pointer to the VMCPU.
543 * @param pVmxTransient Pointer to the VMX transient structure.
544 */
545DECLINLINE(int) hmR0VmxReadExitIntrErrorCodeVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
546{
547 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
548 {
549 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntrErrorCode);
550 AssertRCReturn(rc, rc);
551 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
552 }
553 return VINF_SUCCESS;
554}
555
556
557/**
558 * Reads the VM-exit instruction length field from the VMCS into the VMX
559 * transient structure.
560 *
561 * @returns VBox status code.
562 * @param pVCpu Pointer to the VMCPU.
563 * @param pVmxTransient Pointer to the VMX transient structure.
564 */
565DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
566{
567 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
568 {
569 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
570 AssertRCReturn(rc, rc);
571 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
572 }
573 return VINF_SUCCESS;
574}
575
576
577/**
578 * Reads the exit qualification from the VMCS into the VMX transient structure.
579 *
580 * @returns VBox status code.
581 * @param pVCpu Pointer to the VMCPU.
582 * @param pVmxTransient Pointer to the VMX transient structure.
583 */
584DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
585{
586 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
587 {
588 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification);
589 AssertRCReturn(rc, rc);
590 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
591 }
592 return VINF_SUCCESS;
593}
594
595
596/**
597 * Reads the IDT-vectoring information field from the VMCS into the VMX
598 * transient structure.
599 *
600 * @returns VBox status code.
601 * @param pVmxTransient Pointer to the VMX transient structure.
602 *
603 * @remarks No-long-jump zone!!!
604 */
605DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
606{
607 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
608 {
609 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
610 AssertRCReturn(rc, rc);
611 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
612 }
613 return VINF_SUCCESS;
614}
615
616
617/**
618 * Reads the IDT-vectoring error code from the VMCS into the VMX
619 * transient structure.
620 *
621 * @returns VBox status code.
622 * @param pVmxTransient Pointer to the VMX transient structure.
623 */
624DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
625{
626 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
627 {
628 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
629 AssertRCReturn(rc, rc);
630 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
631 }
632 return VINF_SUCCESS;
633}
634
635
636/**
637 * Enters VMX root mode operation on the current CPU.
638 *
639 * @returns VBox status code.
640 * @param pVM Pointer to the VM (optional, can be NULL, after
641 * a resume).
642 * @param HCPhysCpuPage Physical address of the VMXON region.
643 * @param pvCpuPage Pointer to the VMXON region.
644 */
645static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
646{
647 AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
648 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
649 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
650
651 if (pVM)
652 {
653 /* Write the VMCS revision dword to the VMXON region. */
654 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
655 }
656
657 /* Enable the VMX bit in CR4 if necessary. */
658 RTCCUINTREG uCr4 = ASMGetCR4();
659 if (!(uCr4 & X86_CR4_VMXE))
660 ASMSetCR4(uCr4 | X86_CR4_VMXE);
661
662 /* Enter VMX root mode. */
663 int rc = VMXEnable(HCPhysCpuPage);
664 if (RT_FAILURE(rc))
665 ASMSetCR4(uCr4);
666
667 return rc;
668}
669
670
671/**
672 * Exits VMX root mode operation on the current CPU.
673 *
674 * @returns VBox status code.
675 */
676static int hmR0VmxLeaveRootMode(void)
677{
678 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
679
680 /* If we're for some reason not in VMX root mode, then don't leave it. */
681 if (ASMGetCR4() & X86_CR4_VMXE)
682 {
683 /* Exit VMX root mode and clear the VMX bit in CR4 */
684 VMXDisable();
685 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
686 return VINF_SUCCESS;
687 }
688
689 return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
690}
691
692
693/**
694 * Allocates and maps one physically contiguous page. The allocated page is
695 * zero'd out. (Used by various VT-x structures).
696 *
697 * @returns IPRT status code.
698 * @param pMemObj Pointer to the ring-0 memory object.
699 * @param ppVirt Where to store the virtual address of the
700 * allocation.
701 * @param pPhys Where to store the physical address of the
702 * allocation.
703 */
704DECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
705{
706 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
707 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
708 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
709
710 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
711 if (RT_FAILURE(rc))
712 return rc;
713 *ppVirt = RTR0MemObjAddress(*pMemObj);
714 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
715 ASMMemZero32(*ppVirt, PAGE_SIZE);
716 return VINF_SUCCESS;
717}
718
719
720/**
721 * Frees and unmaps an allocated physical page.
722 *
723 * @param pMemObj Pointer to the ring-0 memory object.
724 * @param ppVirt Where to re-initialize the virtual address of
725 * allocation as 0.
726 * @param pHCPhys Where to re-initialize the physical address of the
727 * allocation as 0.
728 */
729DECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
730{
731 AssertPtr(pMemObj);
732 AssertPtr(ppVirt);
733 AssertPtr(pHCPhys);
734 if (*pMemObj != NIL_RTR0MEMOBJ)
735 {
736 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
737 AssertRC(rc);
738 *pMemObj = NIL_RTR0MEMOBJ;
739 *ppVirt = 0;
740 *pHCPhys = 0;
741 }
742}
743
744
745/**
746 * Worker function to free VT-x related structures.
747 *
748 * @returns IPRT status code.
749 * @param pVM Pointer to the VM.
750 */
751static void hmR0VmxStructsFree(PVM pVM)
752{
753 for (VMCPUID i = 0; i < pVM->cCpus; i++)
754 {
755 PVMCPU pVCpu = &pVM->aCpus[i];
756 AssertPtr(pVCpu);
757
758#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
759 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
760 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
761#endif
762
763 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
764 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
765
766 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
767 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
768 }
769
770 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
771#ifdef VBOX_WITH_CRASHDUMP_MAGIC
772 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
773#endif
774}
775
776
777/**
778 * Worker function to allocate VT-x related VM structures.
779 *
780 * @returns IPRT status code.
781 * @param pVM Pointer to the VM.
782 */
783static int hmR0VmxStructsAlloc(PVM pVM)
784{
785 /*
786 * Initialize members up-front so we can cleanup properly on allocation failure.
787 */
788#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
789 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
790 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
791 pVM->hm.s.vmx.HCPhys##a_Name = 0;
792
793#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
794 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
795 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
796 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
797
798#ifdef VBOX_WITH_CRASHDUMP_MAGIC
799 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
800#endif
801 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
802
803 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
804 for (VMCPUID i = 0; i < pVM->cCpus; i++)
805 {
806 PVMCPU pVCpu = &pVM->aCpus[i];
807 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
808 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
809 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
810#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
811 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
812 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
813#endif
814 }
815#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
816#undef VMXLOCAL_INIT_VM_MEMOBJ
817
818 /*
819 * Allocate all the VT-x structures.
820 */
821 int rc = VINF_SUCCESS;
822#ifdef VBOX_WITH_CRASHDUMP_MAGIC
823 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
824 if (RT_FAILURE(rc))
825 goto cleanup;
826 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
827 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xDEADBEEFDEADBEEF);
828#endif
829
830 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
831 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
832 {
833 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
834 &pVM->hm.s.vmx.HCPhysApicAccess);
835 if (RT_FAILURE(rc))
836 goto cleanup;
837 }
838
839 /*
840 * Initialize per-VCPU VT-x structures.
841 */
842 for (VMCPUID i =0; i < pVM->cCpus; i++)
843 {
844 PVMCPU pVCpu = &pVM->aCpus[i];
845 AssertPtr(pVCpu);
846
847 /* Allocate the VM control structure (VMCS). */
848 AssertReturn(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.vmx_basic_info) <= PAGE_SIZE, VERR_INTERNAL_ERROR);
849 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
850 if (RT_FAILURE(rc))
851 goto cleanup;
852
853 /* Allocate the Virtual-APIC page for transparent TPR accesses. */
854 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
855 {
856 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
857 &pVCpu->hm.s.vmx.HCPhysVirtApic);
858 if (RT_FAILURE(rc))
859 goto cleanup;
860 }
861
862 /* Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for transparent accesses of specific MSRs. */
863 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
864 {
865 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
866 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
867 if (RT_FAILURE(rc))
868 goto cleanup;
869 memset(pVCpu->hm.s.vmx.pvMsrBitmap, 0xff, PAGE_SIZE);
870 }
871
872#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
873 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
874 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
875 if (RT_FAILURE(rc))
876 goto cleanup;
877
878 /* Allocate the VM-exit MSR-load page for the host MSRs. */
879 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
880 if (RT_FAILURE(rc))
881 goto cleanup;
882#endif
883 }
884
885 return VINF_SUCCESS;
886
887cleanup:
888 hmR0VmxStructsFree(pVM);
889 return rc;
890}
891
892
893/**
894 * Does global VT-x initialization (called during module initialization).
895 *
896 * @returns VBox status code.
897 */
898VMMR0DECL(int) VMXR0GlobalInit(void)
899{
900#ifdef HMVMX_USE_FUNCTION_TABLE
901 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
902# ifdef VBOX_STRICT
903 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
904 Assert(g_apfnVMExitHandlers[i]);
905# endif
906#endif
907 return VINF_SUCCESS;
908}
909
910
911/**
912 * Does global VT-x termination (called during module termination).
913 */
914VMMR0DECL(void) VMXR0GlobalTerm()
915{
916 /* Nothing to do currently. */
917}
918
919
920/**
921 * Sets up and activates VT-x on the current CPU.
922 *
923 * @returns VBox status code.
924 * @param pCpu Pointer to the global CPU info struct.
925 * @param pVM Pointer to the VM (can be NULL after a host resume
926 * operation).
927 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
928 * fEnabledByHost is true).
929 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
930 * @a fEnabledByHost is true).
931 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
932 * enable VT-x/AMD-V on the host.
933 */
934VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
935{
936 AssertReturn(pCpu, VERR_INVALID_PARAMETER);
937 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
938
939 if (!fEnabledByHost)
940 {
941 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
942 if (RT_FAILURE(rc))
943 return rc;
944 }
945
946 /*
947 * Flush all VPIDs (in case we or any other hypervisor have been using VPIDs) so that
948 * we can avoid an explicit flush while using new VPIDs. We would still need to flush
949 * each time while reusing a VPID after hitting the MaxASID limit once.
950 */
951 if ( pVM
952 && pVM->hm.s.vmx.fVpid
953 && (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS))
954 {
955 hmR0VmxFlushVpid(pVM, NULL /* pvCpu */, VMX_FLUSH_VPID_ALL_CONTEXTS, 0 /* GCPtr */);
956 pCpu->fFlushAsidBeforeUse = false;
957 }
958 else
959 pCpu->fFlushAsidBeforeUse = true;
960
961 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
962 ++pCpu->cTlbFlushes;
963
964 return VINF_SUCCESS;
965}
966
967
968/**
969 * Deactivates VT-x on the current CPU.
970 *
971 * @returns VBox status code.
972 * @param pCpu Pointer to the global CPU info struct.
973 * @param pvCpuPage Pointer to the VMXON region.
974 * @param HCPhysCpuPage Physical address of the VMXON region.
975 */
976VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
977{
978 NOREF(pCpu);
979 NOREF(pvCpuPage);
980 NOREF(HCPhysCpuPage);
981
982 hmR0VmxLeaveRootMode();
983 return VINF_SUCCESS;
984}
985
986
987/**
988 * Sets the permission bits for the specified MSR in the MSR bitmap.
989 *
990 * @param pVCpu Pointer to the VMCPU.
991 * @param uMSR The MSR value.
992 * @param enmRead Whether reading this MSR causes a VM-exit.
993 * @param enmWrite Whether writing this MSR causes a VM-exit.
994 */
995static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
996{
997 int32_t iBit;
998 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
999
1000 /*
1001 * Layout:
1002 * 0x000 - 0x3ff - Low MSR read bits
1003 * 0x400 - 0x7ff - High MSR read bits
1004 * 0x800 - 0xbff - Low MSR write bits
1005 * 0xc00 - 0xfff - High MSR write bits
1006 */
1007 if (uMsr <= 0x00001FFF)
1008 iBit = uMsr;
1009 else if ( uMsr >= 0xC0000000
1010 && uMsr <= 0xC0001FFF)
1011 {
1012 iBit = (uMsr - 0xC0000000);
1013 pbMsrBitmap += 0x400;
1014 }
1015 else
1016 {
1017 AssertMsgFailed(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1018 return;
1019 }
1020
1021 Assert(iBit <= 0x1fff);
1022 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1023 ASMBitSet(pbMsrBitmap, iBit);
1024 else
1025 ASMBitClear(pbMsrBitmap, iBit);
1026
1027 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1028 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1029 else
1030 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1031}
1032
1033
1034/**
1035 * Flushes the TLB using EPT.
1036 *
1037 * @returns VBox status code.
1038 * @param pVM Pointer to the VM.
1039 * @param pVCpu Pointer to the VMCPU.
1040 * @param enmFlush Type of flush.
1041 */
1042static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
1043{
1044 AssertPtr(pVM);
1045 Assert(pVM->hm.s.fNestedPaging);
1046
1047 LogFlowFunc(("pVM=%p pVCpu=%p enmFlush=%d\n", pVM, pVCpu, enmFlush));
1048
1049 uint64_t descriptor[2];
1050 descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1051 descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1052
1053 int rc = VMXR0InvEPT(enmFlush, &descriptor[0]);
1054 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu->hm.s.vmx.HCPhysEPTP, rc));
1055 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1056}
1057
1058
1059/**
1060 * Flushes the TLB using VPID.
1061 *
1062 * @returns VBox status code.
1063 * @param pVM Pointer to the VM.
1064 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1065 * enmFlush).
1066 * @param enmFlush Type of flush.
1067 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1068 * on @a enmFlush).
1069 */
1070static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr)
1071{
1072 AssertPtr(pVM);
1073 Assert(pVM->hm.s.vmx.fVpid);
1074
1075 uint64_t descriptor[2];
1076 if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS)
1077 {
1078 descriptor[0] = 0;
1079 descriptor[1] = 0;
1080 }
1081 else
1082 {
1083 AssertPtr(pVCpu);
1084 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1085 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1086 descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1087 descriptor[1] = GCPtr;
1088 }
1089
1090 int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc);
1091 AssertMsg(rc == VINF_SUCCESS,
1092 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1093 if ( RT_SUCCESS(rc)
1094 && pVCpu)
1095 {
1096 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1097 }
1098}
1099
1100
1101/**
1102 * Invalidates a guest page by guest virtual address. Only relevant for
1103 * EPT/VPID, otherwise there is nothing really to invalidate.
1104 *
1105 * @returns VBox status code.
1106 * @param pVM Pointer to the VM.
1107 * @param pVCpu Pointer to the VMCPU.
1108 * @param GCVirt Guest virtual address of the page to invalidate.
1109 */
1110VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1111{
1112 AssertPtr(pVM);
1113 AssertPtr(pVCpu);
1114 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
1115
1116 bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1117 if (!fFlushPending)
1118 {
1119 /*
1120 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
1121 * See @bugref{6043} and @bugref{6177}.
1122 *
1123 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
1124 * function maybe called in a loop with individual addresses.
1125 */
1126 if (pVM->hm.s.vmx.fVpid)
1127 {
1128 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1129 {
1130 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt);
1131 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1132 }
1133 else
1134 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1135 }
1136 else if (pVM->hm.s.fNestedPaging)
1137 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1138 }
1139
1140 return VINF_SUCCESS;
1141}
1142
1143
1144/**
1145 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
1146 * otherwise there is nothing really to invalidate.
1147 *
1148 * @returns VBox status code.
1149 * @param pVM Pointer to the VM.
1150 * @param pVCpu Pointer to the VMCPU.
1151 * @param GCPhys Guest physical address of the page to invalidate.
1152 */
1153VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1154{
1155 LogFlowFunc(("%RGp\n", GCPhys));
1156
1157 /*
1158 * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
1159 * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
1160 * This function might be called in a loop.
1161 */
1162 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1163 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
1164 return VINF_SUCCESS;
1165}
1166
1167
1168/**
1169 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1170 * case where neither EPT nor VPID is supported by the CPU.
1171 *
1172 * @param pVM Pointer to the VM.
1173 * @param pVCpu Pointer to the VMCPU.
1174 *
1175 * @remarks Called with interrupts disabled.
1176 */
1177static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu)
1178{
1179 NOREF(pVM);
1180 AssertPtr(pVCpu);
1181 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1182 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1183
1184 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1185 AssertPtr(pCpu);
1186
1187 pVCpu->hm.s.TlbShootdown.cPages = 0;
1188 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1189 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1190 pVCpu->hm.s.fForceTLBFlush = false;
1191 return;
1192}
1193
1194
1195/**
1196 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1197 *
1198 * @param pVM Pointer to the VM.
1199 * @param pVCpu Pointer to the VMCPU.
1200 * @remarks All references to "ASID" in this function pertains to "VPID" in
1201 * Intel's nomenclature. The reason is, to avoid confusion in compare
1202 * statements since the host-CPU copies are named "ASID".
1203 *
1204 * @remarks Called with interrupts disabled.
1205 */
1206static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu)
1207{
1208 AssertPtr(pVM);
1209 AssertPtr(pVCpu);
1210 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1211 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1212 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1213
1214 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1215 AssertPtr(pCpu);
1216
1217 /*
1218 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1219 * This can happen both for start & resume due to long jumps back to ring-3.
1220 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1221 * or the host Cpu is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1222 */
1223 bool fNewASID = false;
1224 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1225 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1226 {
1227 pVCpu->hm.s.fForceTLBFlush = true;
1228 fNewASID = true;
1229 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1230 }
1231
1232 /*
1233 * Check for explicit TLB shootdowns.
1234 */
1235 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1236 {
1237 pVCpu->hm.s.fForceTLBFlush = true;
1238 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1239 }
1240
1241 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1242 if (pVCpu->hm.s.fForceTLBFlush)
1243 {
1244 if (fNewASID)
1245 {
1246 ++pCpu->uCurrentAsid;
1247 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1248 {
1249 pCpu->uCurrentAsid = 1; /* start at 1; host uses 0 */
1250 pCpu->cTlbFlushes++;
1251 pCpu->fFlushAsidBeforeUse = true;
1252 }
1253
1254 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1255 if (pCpu->fFlushAsidBeforeUse)
1256 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1257 }
1258 else
1259 {
1260 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
1261 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_SINGLE_CONTEXT, 0 /* GCPtr */);
1262 else
1263 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1264 }
1265
1266 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1267 pVCpu->hm.s.fForceTLBFlush = false;
1268 }
1269 else
1270 {
1271 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
1272 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
1273 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
1274 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
1275
1276 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1277 * not be executed. See hmQueueInvlPage() where it is commented
1278 * out. Support individual entry flushing someday. */
1279 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1280 {
1281 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1282
1283 /*
1284 * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
1285 * as supported by the CPU.
1286 */
1287 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1288 {
1289 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1290 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1291 }
1292 else
1293 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1294 }
1295 else
1296 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1297 }
1298 pVCpu->hm.s.TlbShootdown.cPages = 0;
1299 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1300
1301 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1302 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1303 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1304 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1305 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1306 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1307
1308 /* Update VMCS with the VPID. */
1309 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1310 AssertRC(rc);
1311}
1312
1313
1314/**
1315 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
1316 *
1317 * @returns VBox status code.
1318 * @param pVM Pointer to the VM.
1319 * @param pVCpu Pointer to the VMCPU.
1320 *
1321 * @remarks Called with interrupts disabled.
1322 */
1323static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu)
1324{
1325 AssertPtr(pVM);
1326 AssertPtr(pVCpu);
1327 AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
1328 AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
1329
1330 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1331 AssertPtr(pCpu);
1332
1333 /*
1334 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1335 * This can happen both for start & resume due to long jumps back to ring-3.
1336 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
1337 */
1338 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1339 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1340 {
1341 pVCpu->hm.s.fForceTLBFlush = true;
1342 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1343 }
1344
1345 /* Check for explicit TLB shootdown flushes. */
1346 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1347 {
1348 pVCpu->hm.s.fForceTLBFlush = true;
1349 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1350 }
1351
1352 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1353 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1354
1355 if (pVCpu->hm.s.fForceTLBFlush)
1356 {
1357 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1358 pVCpu->hm.s.fForceTLBFlush = false;
1359 }
1360 else
1361 {
1362 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1363 * not be executed. See hmQueueInvlPage() where it is commented
1364 * out. Support individual entry flushing someday. */
1365 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1366 {
1367 /* We cannot flush individual entries without VPID support. Flush using EPT. */
1368 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1369 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1370 }
1371 else
1372 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1373 }
1374
1375 pVCpu->hm.s.TlbShootdown.cPages = 0;
1376 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1377}
1378
1379
1380/**
1381 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
1382 *
1383 * @returns VBox status code.
1384 * @param pVM Pointer to the VM.
1385 * @param pVCpu Pointer to the VMCPU.
1386 *
1387 * @remarks Called with interrupts disabled.
1388 */
1389static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu)
1390{
1391 AssertPtr(pVM);
1392 AssertPtr(pVCpu);
1393 AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
1394 AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
1395
1396 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
1397
1398 /*
1399 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
1400 * This can happen both for start & resume due to long jumps back to ring-3.
1401 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1402 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1403 */
1404 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1405 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1406 {
1407 pVCpu->hm.s.fForceTLBFlush = true;
1408 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1409 }
1410
1411 /* Check for explicit TLB shootdown flushes. */
1412 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1413 {
1414 /*
1415 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
1416 * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
1417 * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
1418 */
1419 pVCpu->hm.s.fForceTLBFlush = true;
1420 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1421 }
1422
1423 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1424 if (pVCpu->hm.s.fForceTLBFlush)
1425 {
1426 ++pCpu->uCurrentAsid;
1427 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1428 {
1429 pCpu->uCurrentAsid = 1; /* start at 1; host uses 0 */
1430 pCpu->fFlushAsidBeforeUse = true;
1431 pCpu->cTlbFlushes++;
1432 }
1433
1434 pVCpu->hm.s.fForceTLBFlush = false;
1435 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1436 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1437 if (pCpu->fFlushAsidBeforeUse)
1438 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1439 }
1440 else
1441 {
1442 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
1443 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
1444 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
1445 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
1446
1447 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1448 * not be executed. See hmQueueInvlPage() where it is commented
1449 * out. Support individual entry flushing someday. */
1450 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1451 {
1452 /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
1453 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1454 {
1455 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1456 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1457 }
1458 else
1459 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1460 }
1461 else
1462 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1463 }
1464
1465 pVCpu->hm.s.TlbShootdown.cPages = 0;
1466 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1467
1468 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1469 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1470 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1471 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1472 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1473 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1474
1475 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1476 AssertRC(rc);
1477}
1478
1479
1480/**
1481 * Flushes the guest TLB entry based on CPU capabilities.
1482 *
1483 * @param pVCpu Pointer to the VMCPU.
1484 */
1485DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu)
1486{
1487 PVM pVM = pVCpu->CTX_SUFF(pVM);
1488 switch (pVM->hm.s.vmx.uFlushTaggedTlb)
1489 {
1490 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu); break;
1491 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu); break;
1492 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu); break;
1493 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu); break;
1494 default:
1495 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
1496 break;
1497 }
1498}
1499
1500
1501/**
1502 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
1503 * TLB entries from the host TLB before VM-entry.
1504 *
1505 * @returns VBox status code.
1506 * @param pVM Pointer to the VM.
1507 */
1508static int hmR0VmxSetupTaggedTlb(PVM pVM)
1509{
1510 /*
1511 * Determine optimal flush type for nested paging.
1512 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
1513 * guest execution (see hmR3InitFinalizeR0()).
1514 */
1515 if (pVM->hm.s.fNestedPaging)
1516 {
1517 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
1518 {
1519 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
1520 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT;
1521 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1522 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS;
1523 else
1524 {
1525 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
1526 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1527 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1528 }
1529
1530 /* Make sure the write-back cacheable memory type for EPT is supported. */
1531 if (!(pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
1532 {
1533 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.msr.vmx_ept_vpid_caps));
1534 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1535 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1536 }
1537 }
1538 else
1539 {
1540 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
1541 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1542 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1543 }
1544 }
1545
1546 /*
1547 * Determine optimal flush type for VPID.
1548 */
1549 if (pVM->hm.s.vmx.fVpid)
1550 {
1551 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
1552 {
1553 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
1554 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT;
1555 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
1556 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS;
1557 else
1558 {
1559 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
1560 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1561 LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
1562 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1563 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
1564 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1565 pVM->hm.s.vmx.fVpid = false;
1566 }
1567 }
1568 else
1569 {
1570 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
1571 Log(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
1572 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1573 pVM->hm.s.vmx.fVpid = false;
1574 }
1575 }
1576
1577 /*
1578 * Setup the handler for flushing tagged-TLBs.
1579 */
1580 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
1581 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID;
1582 else if (pVM->hm.s.fNestedPaging)
1583 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT;
1584 else if (pVM->hm.s.vmx.fVpid)
1585 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID;
1586 else
1587 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE;
1588 return VINF_SUCCESS;
1589}
1590
1591
1592/**
1593 * Sets up pin-based VM-execution controls in the VMCS.
1594 *
1595 * @returns VBox status code.
1596 * @param pVM Pointer to the VM.
1597 * @param pVCpu Pointer to the VMCPU.
1598 */
1599static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
1600{
1601 AssertPtr(pVM);
1602 AssertPtr(pVCpu);
1603
1604 uint32_t val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; /* Bits set here must always be set. */
1605 uint32_t zap = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1; /* Bits cleared here must always be cleared. */
1606
1607 val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT /* External interrupts causes a VM-exits. */
1608 | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT; /* Non-maskable interrupts causes a VM-exit. */
1609 Assert(!(val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI));
1610
1611 /* Enable the VMX preemption timer. */
1612 if (pVM->hm.s.vmx.fUsePreemptTimer)
1613 {
1614 Assert(pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER);
1615 val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER;
1616 }
1617
1618 if ((val & zap) != val)
1619 {
1620 LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1621 pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0, val, zap));
1622 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1623 }
1624
1625 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, val);
1626 AssertRCReturn(rc, rc);
1627
1628 /* Update VCPU with the currently set pin-based VM-execution controls. */
1629 pVCpu->hm.s.vmx.u32PinCtls = val;
1630 return rc;
1631}
1632
1633
1634/**
1635 * Sets up processor-based VM-execution controls in the VMCS.
1636 *
1637 * @returns VBox status code.
1638 * @param pVM Pointer to the VM.
1639 * @param pVMCPU Pointer to the VMCPU.
1640 */
1641static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
1642{
1643 AssertPtr(pVM);
1644 AssertPtr(pVCpu);
1645
1646 int rc = VERR_INTERNAL_ERROR_5;
1647 uint32_t val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0; /* Bits set here must be set in the VMCS. */
1648 uint32_t zap = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1649
1650 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT /* HLT causes a VM-exit. */
1651 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
1652 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
1653 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
1654 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT /* RDPMC causes a VM-exit. */
1655 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT /* MONITOR causes a VM-exit. */
1656 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
1657
1658 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
1659 if ( !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
1660 || (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT))
1661 {
1662 LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT combo!"));
1663 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1664 }
1665
1666 /* Without nested paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
1667 if (!pVM->hm.s.fNestedPaging)
1668 {
1669 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
1670 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
1671 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
1672 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
1673 }
1674
1675 /* Use TPR shadowing if supported by the CPU. */
1676 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
1677 {
1678 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
1679 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
1680 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
1681 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
1682 AssertRCReturn(rc, rc);
1683
1684 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
1685 /* CR8 writes causes a VM-exit based on TPR threshold. */
1686 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT));
1687 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT));
1688 }
1689 else
1690 {
1691 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT /* CR8 reads causes a VM-exit. */
1692 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT; /* CR8 writes causes a VM-exit. */
1693 }
1694
1695 /* Use MSR-bitmaps if supported by the CPU. */
1696 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
1697 {
1698 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS;
1699
1700 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1701 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
1702 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1703 AssertRCReturn(rc, rc);
1704
1705 /*
1706 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
1707 * automatically (either as part of the MSR-load/store areas or dedicated fields in the VMCS).
1708 */
1709 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1710 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1711 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1712 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1713 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1714 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1715 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1716 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1717 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1718 }
1719
1720 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
1721 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1722 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
1723
1724 if ((val & zap) != val)
1725 {
1726 LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1727 pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0, val, zap));
1728 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1729 }
1730
1731 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, val);
1732 AssertRCReturn(rc, rc);
1733
1734 /* Update VCPU with the currently set processor-based VM-execution controls. */
1735 pVCpu->hm.s.vmx.u32ProcCtls = val;
1736
1737 /*
1738 * Secondary processor-based VM-execution controls.
1739 */
1740 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
1741 {
1742 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
1743 zap = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1744
1745 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
1746 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
1747
1748 if (pVM->hm.s.fNestedPaging)
1749 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */
1750 else
1751 {
1752 /*
1753 * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
1754 * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT when INVPCID is executed by the guest.
1755 * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
1756 */
1757 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
1758 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
1759 }
1760
1761 if (pVM->hm.s.vmx.fVpid)
1762 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */
1763
1764 if (pVM->hm.s.vmx.fUnrestrictedGuest)
1765 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
1766
1767 /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
1768 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
1769 * done dynamically. */
1770 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
1771 {
1772 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
1773 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
1774 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
1775 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
1776 AssertRCReturn(rc, rc);
1777 }
1778
1779 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
1780 {
1781 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
1782 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
1783 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1784 }
1785
1786 if ((val & zap) != val)
1787 {
1788 LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! "
1789 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0, val, zap));
1790 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1791 }
1792
1793 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2, val);
1794 AssertRCReturn(rc, rc);
1795
1796 /* Update VCPU with the currently set secondary processor-based VM-execution controls. */
1797 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
1798 }
1799
1800 return VINF_SUCCESS;
1801}
1802
1803
1804/**
1805 * Sets up miscellaneous (everything other than Pin & Processor-based
1806 * VM-execution) control fields in the VMCS.
1807 *
1808 * @returns VBox status code.
1809 * @param pVM Pointer to the VM.
1810 * @param pVCpu Pointer to the VMCPU.
1811 */
1812static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
1813{
1814 AssertPtr(pVM);
1815 AssertPtr(pVCpu);
1816
1817 int rc = VERR_GENERAL_FAILURE;
1818
1819 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestControlRegs())*/
1820 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0);
1821
1822 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0);
1823
1824 /*
1825 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
1826 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
1827 * We thus use the exception bitmap to control it rather than use both.
1828 */
1829 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0);
1830 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0);
1831
1832 /** @todo Explore possibility of using IO-bitmaps. */
1833 /* All IO & IOIO instructions cause VM-exits. */
1834 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0);
1835 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0);
1836
1837#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
1838 /* Setup MSR autoloading/autostoring. */
1839 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
1840 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
1841 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1842 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1843 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
1844 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
1845
1846 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
1847 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
1848 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
1849 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0);
1850#else
1851 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
1852 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
1853 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0);
1854#endif
1855
1856 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
1857 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
1858
1859 /* Setup debug controls */
1860 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
1861 rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
1862 AssertRCReturn(rc, rc);
1863 return rc;
1864}
1865
1866
1867/**
1868 * Sets up the initial exception bitmap in the VMCS based on static conditions
1869 * (i.e. conditions that cannot ever change at runtime).
1870 *
1871 * @returns VBox status code.
1872 * @param pVM Pointer to the VM.
1873 * @param pVCpu Pointer to the VMCPU.
1874 */
1875static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
1876{
1877 AssertPtr(pVM);
1878 AssertPtr(pVCpu);
1879
1880 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
1881
1882 uint32_t u32XcptBitmap = 0;
1883
1884 /* Without nested paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
1885 if (!pVM->hm.s.fNestedPaging)
1886 u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
1887
1888 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
1889 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
1890 AssertRCReturn(rc, rc);
1891 return rc;
1892}
1893
1894
1895/**
1896 * Sets up the initial guest-state mask. The guest-state mask is consulted
1897 * before reading guest-state fields from the VMCS as VMREADs can be expensive
1898 * for the nested virtualization case (as it would cause a VM-exit).
1899 *
1900 * @param pVCpu Pointer to the VMCPU.
1901 */
1902static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
1903{
1904 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
1905 pVCpu->hm.s.vmx.fUpdatedGuestState = HMVMX_UPDATED_GUEST_ALL;
1906 return VINF_SUCCESS;
1907}
1908
1909
1910/**
1911 * Does per-VM VT-x initialization.
1912 *
1913 * @returns VBox status code.
1914 * @param pVM Pointer to the VM.
1915 */
1916VMMR0DECL(int) VMXR0InitVM(PVM pVM)
1917{
1918 LogFlowFunc(("pVM=%p\n", pVM));
1919
1920 int rc = hmR0VmxStructsAlloc(pVM);
1921 if (RT_FAILURE(rc))
1922 {
1923 LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
1924 return rc;
1925 }
1926
1927 return VINF_SUCCESS;
1928}
1929
1930
1931/**
1932 * Does per-VM VT-x termination.
1933 *
1934 * @returns VBox status code.
1935 * @param pVM Pointer to the VM.
1936 */
1937VMMR0DECL(int) VMXR0TermVM(PVM pVM)
1938{
1939 LogFlowFunc(("pVM=%p\n", pVM));
1940
1941#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1942 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
1943 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
1944#endif
1945 hmR0VmxStructsFree(pVM);
1946 return VINF_SUCCESS;
1947}
1948
1949
1950/**
1951 * Sets up the VM for execution under VT-x.
1952 * This function is only called once per-VM during initalization.
1953 *
1954 * @returns VBox status code.
1955 * @param pVM Pointer to the VM.
1956 */
1957VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
1958{
1959 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
1960 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1961
1962 LogFlowFunc(("pVM=%p\n", pVM));
1963
1964 /*
1965 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
1966 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0().
1967 */
1968 /* -XXX- change hmR3InitFinalizeR0() to fail if pRealModeTSS alloc fails. */
1969 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
1970 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
1971 || !pVM->hm.s.vmx.pRealModeTSS))
1972 {
1973 LogRel(("VMXR0SetupVM: invalid real-on-v86 state.\n"));
1974 return VERR_INTERNAL_ERROR;
1975 }
1976
1977 /* Initialize these always, see hmR3InitFinalizeR0().*/
1978 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NONE;
1979 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NONE;
1980
1981 /* Setup the tagged-TLB flush handlers. */
1982 int rc = hmR0VmxSetupTaggedTlb(pVM);
1983 if (RT_FAILURE(rc))
1984 {
1985 LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
1986 return rc;
1987 }
1988
1989 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1990 {
1991 PVMCPU pVCpu = &pVM->aCpus[i];
1992 AssertPtr(pVCpu);
1993 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
1994
1995 /* Set revision dword at the beginning of the VMCS structure. */
1996 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
1997
1998 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
1999 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2000 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2001 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2002
2003 /* Load this VMCS as the current VMCS. */
2004 rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2005 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVMCS failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2006 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2007
2008 rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
2009 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2010 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2011
2012 rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
2013 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2014 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2015
2016 rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
2017 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2018 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2019
2020 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
2021 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2022 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2023
2024 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
2025 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2026 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2027
2028#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2029 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
2030 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2031 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2032#endif
2033
2034 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
2035 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
2036 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVMCS(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2037 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2038
2039 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
2040 }
2041
2042 return VINF_SUCCESS;
2043}
2044
2045
2046/**
2047 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2048 * the VMCS.
2049 *
2050 * @returns VBox status code.
2051 * @param pVM Pointer to the VM.
2052 * @param pVCpu Pointer to the VMCPU.
2053 */
2054DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
2055{
2056 RTCCUINTREG uReg = ASMGetCR0();
2057 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2058
2059#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2060 /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */
2061 if (HMVMX_IS_64BIT_HOST_MODE())
2062 {
2063 uint64_t uRegCR3 = hmR0Get64bitCR3();
2064 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3);
2065 }
2066 else
2067#endif
2068 {
2069 uReg = ASMGetCR3();
2070 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2071 }
2072
2073 uReg = ASMGetCR4();
2074 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2075 AssertRCReturn(rc, rc);
2076 return rc;
2077}
2078
2079
2080/**
2081 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2082 * the host-state area in the VMCS.
2083 *
2084 * @returns VBox status code.
2085 * @param pVM Pointer to the VM.
2086 * @param pVCpu Pointer to the VMCPU.
2087 */
2088DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
2089{
2090 int rc = VERR_INTERNAL_ERROR_5;
2091 RTSEL uSelCS = 0;
2092 RTSEL uSelSS = 0;
2093 RTSEL uSelDS = 0;
2094 RTSEL uSelES = 0;
2095 RTSEL uSelFS = 0;
2096 RTSEL uSelGS = 0;
2097 RTSEL uSelTR = 0;
2098
2099 /*
2100 * Host Selector registers.
2101 */
2102#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2103 if (HMVMX_IS_64BIT_HOST_MODE())
2104 {
2105 uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;
2106 uSelSS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;
2107 }
2108 else
2109 {
2110 /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */
2111 uSelCS = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;
2112 uSelSS = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;
2113 }
2114#else
2115 uSelCS = ASMGetCS();
2116 uSelSS = ASMGetSS();
2117#endif
2118
2119 /* Note: VT-x is picky about the RPL of the selectors here; we'll restore them manually. */
2120 uSelTR = ASMGetTR();
2121
2122 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2123 /** @todo Verify if we have any platform that actually run with DS or ES with
2124 * RPL != 0 in kernel space. */
2125 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2126 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2127 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2128 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2129 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2130 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2131 Assert(uSelCS != 0);
2132 Assert(uSelTR != 0);
2133
2134 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2135#if 0
2136 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE))
2137 Assert(uSelSS != 0);
2138#endif
2139
2140 /* Write these host selector fields into the host-state area in the VMCS. */
2141 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS);
2142 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS);
2143 /* Avoid the VMWRITEs as we set the following segments to 0 and the VMCS fields are already 0 (since g_HvmR0 is static) */
2144#if 0
2145 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS);
2146 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES);
2147 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS);
2148 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS);
2149#endif
2150 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR);
2151 AssertRCReturn(rc, rc);
2152
2153 /*
2154 * Host GDTR and IDTR.
2155 */
2156 /** @todo Despite VT-x -not- restoring the limits on GDTR and IDTR it should
2157 * be safe to -not- save and restore GDTR and IDTR in the assembly
2158 * code and just do it here and don't care if the limits are zapped on
2159 * VM-exit. */
2160 RTGDTR Gdtr;
2161 RT_ZERO(Gdtr);
2162#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2163 if (HMVMX_IS_64BIT_HOST_MODE())
2164 {
2165 X86XDTR64 Gdtr64;
2166 X86XDTR64 Idtr64;
2167 hmR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64);
2168 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr);
2169 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr);
2170 Gdtr.cbGdt = Gdtr64.cb;
2171 Gdtr.pGdt = (uintptr_t)Gdtr64.uAddr;
2172 }
2173 else
2174#endif
2175 {
2176 RTIDTR Idtr;
2177 ASMGetGDTR(&Gdtr);
2178 ASMGetIDTR(&Idtr);
2179 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt);
2180 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt);
2181 }
2182 AssertRCReturn(rc, rc);
2183
2184 /*
2185 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
2186 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
2187 */
2188 if ((uSelTR & X86_SEL_MASK) > Gdtr.cbGdt)
2189 {
2190 AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit.TR=%RTsel Gdtr.cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));
2191 return VERR_VMX_INVALID_HOST_STATE;
2192 }
2193
2194 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
2195#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2196 if (HMVMX_IS_64BIT_HOST_MODE())
2197 {
2198 /* We need the 64-bit TR base for hybrid darwin. */
2199 uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc);
2200 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, u64TRBase);
2201 }
2202 else
2203#endif
2204 {
2205 uintptr_t uTRBase;
2206#if HC_ARCH_BITS == 64
2207 uTRBase = X86DESC64_BASE(pDesc);
2208#else
2209 uTRBase = X86DESC_BASE(pDesc);
2210#endif
2211 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
2212 }
2213 AssertRCReturn(rc, rc);
2214
2215 /*
2216 * Host FS base and GS base.
2217 * For 32-bit hosts the base is handled by the assembly code where we push/pop FS and GS which .
2218 * would take care of the bases. In 64-bit, the MSRs come into play.
2219 */
2220#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2221 if (HMVMX_IS_64BIT_HOST_MODE())
2222 {
2223 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
2224 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
2225 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_FS_BASE, u64FSBase);
2226 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_GS_BASE, u64GSBase);
2227 AssertRCReturn(rc, rc);
2228 }
2229#endif
2230 return rc;
2231}
2232
2233
2234/**
2235 * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
2236 * host-state area of the VMCS. Theses MSRs will be automatically restored on
2237 * the host after every successful VM exit.
2238 *
2239 * @returns VBox status code.
2240 * @param pVM Pointer to the VM.
2241 * @param pVCpu Pointer to the VMCPU.
2242 */
2243DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
2244{
2245 AssertPtr(pVCpu);
2246 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
2247
2248 int rc = VINF_SUCCESS;
2249#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
2250 PVMXMSR pHostMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMsr;
2251 uint32_t cHostMsrs = 0;
2252 uint32_t u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
2253
2254 if (u32HostExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
2255 {
2256 pHostMsr->u32IndexMSR = MSR_K6_EFER;
2257 pHostMsr->u32Reserved = 0;
2258# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2259 if (CPUMIsGuestInLongMode(pVCpu))
2260 {
2261 /* Must match the EFER value in our 64 bits switcher. */
2262 pHostMsr->u64Value = ASMRdMsr(MSR_K6_EFER) | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE;
2263 }
2264 else
2265# endif
2266 pHostMsr->u64Value = ASMRdMsr(MSR_K6_EFER);
2267 pHostMsr++; cHostMsrs++;
2268 }
2269
2270# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2271 if (HMVMX_IS_64BIT_HOST_MODE())
2272 {
2273 pHostMsr->u32IndexMSR = MSR_K6_STAR;
2274 pHostMsr->u32Reserved = 0;
2275 pHostMsr->u64Value = ASMRdMsr(MSR_K6_STAR); /* legacy syscall eip, cs & ss */
2276 pHostMsr++; cHostMsrs++;
2277 pHostMsr->u32IndexMSR = MSR_K8_LSTAR;
2278 pHostMsr->u32Reserved = 0;
2279 pHostMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64-bit mode syscall rip */
2280 pHostMsr++; cHostMsrs++;
2281 pHostMsr->u32IndexMSR = MSR_K8_SF_MASK;
2282 pHostMsr->u32Reserved = 0;
2283 pHostMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */
2284 pHostMsr++; cHostMsrs++;
2285 pHostMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
2286 pHostMsr->u32Reserved = 0;
2287 pHostMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */
2288 pHostMsr++; cHostMsrs++;
2289 }
2290# endif
2291
2292 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
2293 if (RT_UNLIKELY(cHostMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)))
2294 {
2295 LogRel(("cHostMsrs=%u Cpu=%u\n", cHostMsrs, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)));
2296 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2297 }
2298
2299 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs);
2300#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
2301
2302 /*
2303 * Host Sysenter MSRs.
2304 */
2305 rc |= VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
2306# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2307 if (HMVMX_IS_64BIT_HOST_MODE())
2308 {
2309 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2310 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2311 }
2312 else
2313 {
2314 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2315 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2316 }
2317# elif HC_ARCH_BITS == 32
2318 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2319 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2320# else
2321 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2322 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2323# endif
2324 AssertRCReturn(rc, rc);
2325
2326 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT, IA32_EFER, also see
2327 * hmR0VmxSetupExitCtls() !! */
2328 return rc;
2329}
2330
2331
2332/**
2333 * Sets up VM-entry controls in the VMCS. These controls can affect things done
2334 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
2335 * controls".
2336 *
2337 * @returns VBox status code.
2338 * @param pVCpu Pointer to the VMCPU.
2339 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2340 * out-of-sync. Make sure to update the required fields
2341 * before using them.
2342 *
2343 * @remarks No-long-jump zone!!!
2344 */
2345DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2346{
2347 int rc = VINF_SUCCESS;
2348 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_ENTRY_CTLS)
2349 {
2350 PVM pVM = pVCpu->CTX_SUFF(pVM);
2351 uint32_t val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0; /* Bits set here must be set in the VMCS. */
2352 uint32_t zap = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2353
2354 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
2355 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG;
2356
2357 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
2358 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2359 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST;
2360 else
2361 Assert(!(val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST));
2362
2363 /*
2364 * The following should not be set (since we're not in SMM mode):
2365 * - VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM
2366 * - VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON
2367 */
2368
2369 /** @todo VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR,
2370 * VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR,
2371 * VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR */
2372
2373 if ((val & zap) != val)
2374 {
2375 LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2376 pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0, val, zap));
2377 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2378 }
2379
2380 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_CONTROLS, val);
2381 AssertRCReturn(rc, rc);
2382
2383 /* Update VCPU with the currently set VM-exit controls. */
2384 pVCpu->hm.s.vmx.u32EntryCtls = val;
2385 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_ENTRY_CTLS;
2386 }
2387 return rc;
2388}
2389
2390
2391/**
2392 * Sets up the VM-exit controls in the VMCS.
2393 *
2394 * @returns VBox status code.
2395 * @param pVM Pointer to the VM.
2396 * @param pVCpu Pointer to the VMCPU.
2397 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2398 * out-of-sync. Make sure to update the required fields
2399 * before using them.
2400 *
2401 * @remarks requires EFER.
2402 */
2403DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2404{
2405 int rc = VINF_SUCCESS;
2406 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_EXIT_CTLS)
2407 {
2408 PVM pVM = pVCpu->CTX_SUFF(pVM);
2409 uint32_t val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0; /* Bits set here must be set in the VMCS. */
2410 uint32_t zap = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2411
2412 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
2413 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG;
2414
2415 /* Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary. */
2416#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2417 if (HMVMX_IS_64BIT_HOST_MODE())
2418 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE;
2419 else
2420 Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE));
2421#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
2422 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2423 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE; /* The switcher goes to long mode. */
2424 else
2425 Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE));
2426#endif
2427
2428 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
2429 Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT));
2430
2431 /** @todo VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_PERF_MSR,
2432 * VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR,
2433 * VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR,
2434 * VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR,
2435 * VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR. */
2436
2437 if (pVM->hm.s.vmx.msr.vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
2438 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER;
2439
2440 if ((val & zap) != val)
2441 {
2442 LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2443 pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0, val, zap));
2444 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2445 }
2446
2447 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_CONTROLS, val);
2448 AssertRCReturn(rc, rc);
2449
2450 /* Update VCPU with the currently set VM-exit controls. */
2451 pVCpu->hm.s.vmx.u32ExitCtls = val;
2452 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_EXIT_CTLS;
2453 }
2454 return rc;
2455}
2456
2457
2458/**
2459 * Loads the guest APIC and related state.
2460 *
2461 * @returns VBox status code.
2462 * @param pVM Pointer to the VM.
2463 * @param pVCpu Pointer to the VMCPU.
2464 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2465 * out-of-sync. Make sure to update the required fields
2466 * before using them.
2467 */
2468DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2469{
2470 int rc = VINF_SUCCESS;
2471 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_APIC_STATE)
2472 {
2473 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
2474 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
2475 {
2476 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2477
2478 bool fPendingIntr = false;
2479 uint8_t u8GuestTpr = 0;
2480 rc = PDMApicGetTPR(pVCpu, &u8GuestTpr, &fPendingIntr);
2481 AssertRCReturn(rc, rc);
2482
2483 /*
2484 * If there are external interrupts pending but masked by the TPR value, apply the threshold so that if the guest
2485 * lowers the TPR, it would cause a VM-exit and we can deliver the interrupt.
2486 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
2487 * the interrupt when we VM-exit for other reasons.
2488 */
2489 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8GuestTpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
2490 /* Bits 3-0 of the TPR threshold field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
2491 uint32_t u32TprThreshold = fPendingIntr ? (u8GuestTpr >> 4) : 0;
2492 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
2493
2494 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
2495 AssertRCReturn(rc, rc);
2496
2497 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
2498 if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive)
2499 {
2500 Assert(!CPUMIsGuestInLongModeEx(pMixedCtx)); /* EFER always up-to-date. */
2501 pMixedCtx->msrLSTAR = u8GuestTpr;
2502 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
2503 {
2504 /* If there are interrupts pending, intercept CR8 writes, otherwise don't intercept CR8 reads or writes. */
2505 if (fPendingIntr)
2506 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_INTERCEPT_WRITE);
2507 else
2508 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2509 }
2510 }
2511 }
2512
2513 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_APIC_STATE;
2514 }
2515 return rc;
2516}
2517
2518
2519/**
2520 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
2521 *
2522 * @returns
2523 * @param pVCpu Pointer to the VMCPU.
2524 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2525 * out-of-sync. Make sure to update the required fields
2526 * before using them.
2527 *
2528 * @remarks No-long-jump zone!!!
2529 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
2530 */
2531DECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2532{
2533 /*
2534 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
2535 * inhibit interrupts or clear any existing interrupt-inhibition.
2536 */
2537 uint32_t uIntrState = 0;
2538 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2539 {
2540 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
2541 AssertMsg((pVCpu->hm.s.vmx.fUpdatedGuestState & (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS))
2542 == (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS), ("%#x\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
2543 if (pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
2544 {
2545 /*
2546 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
2547 * VT-x the flag's condition to be cleared is met and thus the cleared state is correct.
2548 */
2549 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2550 }
2551 else if (pMixedCtx->eflags.Bits.u1IF)
2552 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
2553 else
2554 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
2555 }
2556 return uIntrState;
2557}
2558
2559
2560/**
2561 * Loads the guest's interruptibility-state into the guest-state area in the
2562 * VMCS.
2563 *
2564 * @returns VBox status code.
2565 * @param pVCpu Pointer to the VMCPU.
2566 * @param uIntrState The interruptibility-state to set.
2567 */
2568static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
2569{
2570 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */
2571 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
2572 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
2573 AssertRCReturn(rc, rc);
2574 return rc;
2575}
2576
2577
2578/**
2579 * Loads the guest's RIP into the guest-state area in the VMCS.
2580 *
2581 * @returns VBox status code.
2582 * @param pVCpu Pointer to the VMCPU.
2583 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2584 * out-of-sync. Make sure to update the required fields
2585 * before using them.
2586 *
2587 * @remarks No-long-jump zone!!!
2588 */
2589static int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2590{
2591 int rc = VINF_SUCCESS;
2592 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RIP)
2593 {
2594 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
2595 AssertRCReturn(rc, rc);
2596 Log(("Load: VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip));
2597 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP;
2598 }
2599 return rc;
2600}
2601
2602
2603/**
2604 * Loads the guest's RSP into the guest-state area in the VMCS.
2605 *
2606 * @returns VBox status code.
2607 * @param pVCpu Pointer to the VMCPU.
2608 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2609 * out-of-sync. Make sure to update the required fields
2610 * before using them.
2611 *
2612 * @remarks No-long-jump zone!!!
2613 */
2614static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2615{
2616 int rc = VINF_SUCCESS;
2617 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RSP)
2618 {
2619 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
2620 AssertRCReturn(rc, rc);
2621 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP;
2622 }
2623 return rc;
2624}
2625
2626
2627/**
2628 * Loads the guest's RFLAGS into the guest-state area in the VMCS.
2629 *
2630 * @returns VBox status code.
2631 * @param pVCpu Pointer to the VMCPU.
2632 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2633 * out-of-sync. Make sure to update the required fields
2634 * before using them.
2635 *
2636 * @remarks No-long-jump zone!!!
2637 */
2638static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2639{
2640 int rc = VINF_SUCCESS;
2641 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
2642 {
2643 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
2644 Let us assert it as such and use 32-bit VMWRITE. */
2645 Assert(!(pMixedCtx->rflags.u64 >> 32));
2646 X86EFLAGS uEFlags = pMixedCtx->eflags;
2647 uEFlags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
2648 uEFlags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */
2649
2650 /*
2651 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM exit.
2652 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
2653 */
2654 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2655 {
2656 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2657 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2658 pVCpu->hm.s.vmx.RealMode.eflags.u32 = uEFlags.u32; /* Save the original eflags of the real-mode guest. */
2659 uEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
2660 uEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
2661 }
2662
2663 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, uEFlags.u32);
2664 AssertRCReturn(rc, rc);
2665
2666 Log(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", uEFlags.u32));
2667 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;
2668 }
2669 return rc;
2670}
2671
2672
2673/**
2674 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
2675 *
2676 * @returns VBox status code.
2677 * @param pVCpu Pointer to the VMCPU.
2678 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2679 * out-of-sync. Make sure to update the required fields
2680 * before using them.
2681 *
2682 * @remarks No-long-jump zone!!!
2683 */
2684static int hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2685{
2686 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
2687 rc |= hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
2688 rc |= hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
2689 return rc;
2690}
2691
2692
2693/**
2694 * Loads the guest control registers (CR0, CR3, CR4) into the guest-state area
2695 * in the VMCS.
2696 *
2697 * @returns VBox status code.
2698 * @param pVM Pointer to the VM.
2699 * @param pVCpu Pointer to the VMCPU.
2700 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2701 * out-of-sync. Make sure to update the required fields
2702 * before using them.
2703 *
2704 * @remarks No-long-jump zone!!!
2705 */
2706static int hmR0VmxLoadGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
2707{
2708 int rc = VINF_SUCCESS;
2709 PVM pVM = pVCpu->CTX_SUFF(pVM);
2710
2711 /*
2712 * Guest CR0.
2713 * Guest FPU.
2714 */
2715 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
2716 {
2717 Assert(!(pCtx->cr0 >> 32));
2718 uint32_t u32GuestCR0 = pCtx->cr0;
2719
2720 /* The guest's view (read access) of its CR0 is unblemished. */
2721 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
2722 AssertRCReturn(rc, rc);
2723 Log(("Load: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", u32GuestCR0));
2724
2725 /* Setup VT-x's view of the guest CR0. */
2726 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
2727 if (pVM->hm.s.fNestedPaging)
2728 {
2729 if (CPUMIsGuestPagingEnabledEx(pCtx))
2730 {
2731 /* The guest has paging enabled, let it access CR3 without causing a VM exit if supported. */
2732 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
2733 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT);
2734 }
2735 else
2736 {
2737 /* The guest doesn't have paging enabled, make CR3 access to cause VM exits to update our shadow. */
2738 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
2739 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
2740 }
2741
2742 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
2743 AssertRCReturn(rc, rc);
2744 }
2745 else
2746 u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a VM-exit. */
2747
2748 /*
2749 * Guest FPU bits.
2750 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
2751 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
2752 */
2753 u32GuestCR0 |= X86_CR0_NE;
2754 bool fInterceptNM = false;
2755 if (CPUMIsGuestFPUStateActive(pVCpu))
2756 {
2757 fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
2758 /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
2759 We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
2760 }
2761 else
2762 {
2763 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
2764 u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
2765 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
2766 }
2767
2768 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
2769 bool fInterceptMF = false;
2770 if (!(pCtx->cr0 & X86_CR0_NE))
2771 fInterceptMF = true;
2772
2773 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
2774 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2775 {
2776 Assert(PDMVmmDevHeapIsEnabled(pVM));
2777 Assert(pVM->hm.s.vmx.pRealModeTSS);
2778 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
2779 fInterceptNM = true;
2780 fInterceptMF = true;
2781 }
2782 else
2783 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
2784
2785 if (fInterceptNM)
2786 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
2787 else
2788 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
2789
2790 if (fInterceptMF)
2791 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
2792 else
2793 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
2794
2795 /* Additional intercepts for debugging, define these yourself explicitly. */
2796#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
2797 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_BP)
2798 | RT_BIT(X86_XCPT_DB)
2799 | RT_BIT(X86_XCPT_DE)
2800 | RT_BIT(X86_XCPT_NM)
2801 | RT_BIT(X86_XCPT_UD)
2802 | RT_BIT(X86_XCPT_NP)
2803 | RT_BIT(X86_XCPT_SS)
2804 | RT_BIT(X86_XCPT_GP)
2805 | RT_BIT(X86_XCPT_PF)
2806 | RT_BIT(X86_XCPT_MF);
2807#elif defined(HMVMX_ALWAYS_TRAP_PF)
2808 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
2809#endif
2810
2811 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
2812
2813 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
2814 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 & pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
2815 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 | pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
2816 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
2817 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
2818 else
2819 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
2820
2821 u32GuestCR0 |= uSetCR0;
2822 u32GuestCR0 &= uZapCR0;
2823 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
2824
2825 /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */
2826 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
2827 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
2828 Log(("Load: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", u32GuestCR0, uSetCR0, uZapCR0));
2829
2830 /*
2831 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
2832 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
2833 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
2834 */
2835 uint64_t u64CR0Mask = 0;
2836 u64CR0Mask = X86_CR0_PE
2837 | X86_CR0_NE
2838 | X86_CR0_WP
2839 | X86_CR0_PG
2840 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
2841 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
2842 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
2843 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2844 u64CR0Mask &= ~X86_CR0_PE;
2845 if (pVM->hm.s.fNestedPaging)
2846 u64CR0Mask &= ~X86_CR0_WP;
2847
2848 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
2849 if (fInterceptNM)
2850 u64CR0Mask |= (X86_CR0_TS | X86_CR0_MP);
2851 else
2852 u64CR0Mask &= ~(X86_CR0_TS | X86_CR0_MP);
2853
2854 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
2855 pVCpu->hm.s.vmx.cr0_mask = u64CR0Mask;
2856 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, u64CR0Mask);
2857 AssertRCReturn(rc, rc);
2858
2859 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
2860 }
2861
2862 /*
2863 * Guest CR2.
2864 * It's always loaded in the assembler code. Nothing to do here.
2865 */
2866
2867 /*
2868 * Guest CR3.
2869 */
2870 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
2871 {
2872 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
2873 if (pVM->hm.s.fNestedPaging)
2874 {
2875 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
2876
2877 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
2878 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
2879 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
2880 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
2881
2882 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
2883 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
2884 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
2885
2886 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
2887 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
2888 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x3f) == 0, /* Bits 6:11 MBZ. */
2889 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
2890
2891 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
2892 AssertRCReturn(rc, rc);
2893 Log(("Load: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
2894
2895 if ( pVM->hm.s.vmx.fUnrestrictedGuest
2896 || CPUMIsGuestPagingEnabledEx(pCtx))
2897 {
2898 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
2899 if (CPUMIsGuestInPAEModeEx(pCtx))
2900 {
2901 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
2902 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u);
2903 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u);
2904 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u);
2905 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u);
2906 AssertRCReturn(rc, rc);
2907 }
2908
2909 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
2910 have Unrestricted Execution to handle the guest when it's not using paging. */
2911 GCPhysGuestCR3 = pCtx->cr3;
2912 }
2913 else
2914 {
2915 /*
2916 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
2917 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
2918 * EPT takes care of translating it to host-physical addresses.
2919 */
2920 RTGCPHYS GCPhys;
2921 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
2922 Assert(PDMVmmDevHeapIsEnabled(pVM));
2923
2924 /* We obtain it here every time as the guest could have relocated this PCI region. */
2925 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2926 AssertRCReturn(rc, rc);
2927
2928 GCPhysGuestCR3 = GCPhys;
2929 }
2930 }
2931 else
2932 {
2933 /* Non-nested paging case, just use the hypervisor's CR3. */
2934 GCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
2935 }
2936
2937 Log(("Load: VMX_VMCS_GUEST_CR3=%#RGv\n", GCPhysGuestCR3));
2938 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
2939 AssertRCReturn(rc, rc);
2940
2941 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR3;
2942 }
2943
2944 /*
2945 * Guest CR4.
2946 */
2947 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
2948 {
2949 Assert(!(pCtx->cr4 >> 32));
2950 uint32_t u32GuestCR4 = pCtx->cr4;
2951
2952 /* The guest's view of its CR4 is unblemished. */
2953 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
2954 AssertRCReturn(rc, rc);
2955 Log(("Load: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", u32GuestCR4));
2956
2957 /* Setup VT-x's view of the guest CR4. */
2958 /*
2959 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
2960 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
2961 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
2962 */
2963 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2964 {
2965 Assert(pVM->hm.s.vmx.pRealModeTSS);
2966 Assert(PDMVmmDevHeapIsEnabled(pVM));
2967 u32GuestCR4 &= ~X86_CR4_VME;
2968 }
2969
2970 if (pVM->hm.s.fNestedPaging)
2971 {
2972 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
2973 && !pVM->hm.s.vmx.fUnrestrictedGuest)
2974 {
2975 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
2976 u32GuestCR4 |= X86_CR4_PSE;
2977 /* Our identity mapping is a 32 bits page directory. */
2978 u32GuestCR4 &= ~X86_CR4_PAE;
2979 }
2980 /* else use guest CR4.*/
2981 }
2982 else
2983 {
2984 /*
2985 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
2986 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
2987 */
2988 switch (pVCpu->hm.s.enmShadowMode)
2989 {
2990 case PGMMODE_REAL: /* Real-mode. */
2991 case PGMMODE_PROTECTED: /* Protected mode without paging. */
2992 case PGMMODE_32_BIT: /* 32-bit paging. */
2993 {
2994 u32GuestCR4 &= ~X86_CR4_PAE;
2995 break;
2996 }
2997
2998 case PGMMODE_PAE: /* PAE paging. */
2999 case PGMMODE_PAE_NX: /* PAE paging with NX. */
3000 {
3001 u32GuestCR4 |= X86_CR4_PAE;
3002 break;
3003 }
3004
3005 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
3006 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
3007#ifdef VBOX_ENABLE_64_BITS_GUESTS
3008 break;
3009#endif
3010 default:
3011 AssertFailed();
3012 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
3013 }
3014 }
3015
3016 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
3017 uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 & pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
3018 uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 | pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
3019 u32GuestCR4 |= uSetCR4;
3020 u32GuestCR4 &= uZapCR4;
3021
3022 /* Write VT-x's view of the guest CR4 into the VMCS. */
3023 Log(("Load: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", u32GuestCR4, uSetCR4, uZapCR4));
3024 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4);
3025
3026 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM exit. */
3027 uint64_t u64CR4Mask = 0;
3028 u64CR4Mask = X86_CR4_VME
3029 | X86_CR4_PAE
3030 | X86_CR4_PGE
3031 | X86_CR4_PSE
3032 | X86_CR4_VMXE;
3033 pVCpu->hm.s.vmx.cr4_mask = u64CR4Mask;
3034 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, u64CR4Mask);
3035 AssertRCReturn(rc, rc);
3036
3037 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
3038 }
3039 return rc;
3040}
3041
3042
3043/**
3044 * Loads the guest debug registers into the guest-state area in the VMCS.
3045 * This also sets up whether #DB and MOV DRx accesses cause VM exits.
3046 *
3047 * @returns VBox status code.
3048 * @param pVCpu Pointer to the VMCPU.
3049 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3050 * out-of-sync. Make sure to update the required fields
3051 * before using them.
3052 *
3053 * @remarks No-long-jump zone!!!
3054 */
3055static int hmR0VmxLoadGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3056{
3057 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
3058 return VINF_SUCCESS;
3059
3060#ifdef VBOX_STRICT
3061 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
3062 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
3063 {
3064 Assert(!(pMixedCtx->dr[7] >> 32)); /* upper 32 bits are reserved (MBZ). */
3065 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
3066 Assert((pMixedCtx->dr[7] & 0xd800) == 0); /* bits 15, 14, 12, 11 are reserved (MBZ). */
3067 Assert((pMixedCtx->dr[7] & 0x400) == 0x400); /* bit 10 is reserved (MB1). */
3068 }
3069#endif
3070
3071 int rc = VERR_INTERNAL_ERROR_5;
3072 PVM pVM = pVCpu->CTX_SUFF(pVM);
3073 bool fInterceptDB = false;
3074 bool fInterceptMovDRx = false;
3075 if (DBGFIsStepping(pVCpu))
3076 {
3077 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
3078 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
3079 {
3080 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
3081 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
3082 AssertRCReturn(rc, rc);
3083 Assert(fInterceptDB == false);
3084 }
3085 else
3086 fInterceptDB = true;
3087 }
3088
3089 if (CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
3090 {
3091 if (!CPUMIsHyperDebugStateActive(pVCpu))
3092 {
3093 rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
3094 AssertRC(rc);
3095 }
3096 Assert(CPUMIsHyperDebugStateActive(pVCpu));
3097 fInterceptMovDRx = true;
3098 }
3099 else if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
3100 {
3101 if (!CPUMIsGuestDebugStateActive(pVCpu))
3102 {
3103 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
3104 AssertRC(rc);
3105 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
3106 }
3107 Assert(CPUMIsGuestDebugStateActive(pVCpu));
3108 Assert(fInterceptMovDRx == false);
3109 }
3110 else if (!CPUMIsGuestDebugStateActive(pVCpu))
3111 {
3112 /* For the first time we would need to intercept MOV DRx accesses even when the guest debug registers aren't loaded. */
3113 fInterceptMovDRx = true;
3114 }
3115
3116 /* Update the exception bitmap regarding intercepting #DB generated by the guest. */
3117 if (fInterceptDB)
3118 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
3119 else if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3120 {
3121#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3122 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
3123#endif
3124 }
3125
3126 /* Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions. */
3127 if (fInterceptMovDRx)
3128 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
3129 else
3130 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
3131
3132 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3133 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
3134
3135 /* The guest's view of its DR7 is unblemished. */
3136 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
3137
3138 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
3139 return rc;
3140}
3141
3142
3143#ifdef VBOX_STRICT
3144/**
3145 * Strict function to validate segment registers.
3146 *
3147 * @remarks Requires CR0.
3148 */
3149static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3150{
3151 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3152 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
3153 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
3154 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
3155 && ( !CPUMIsGuestInRealModeEx(pCtx)
3156 && !CPUMIsGuestInV86ModeEx(pCtx)))
3157 {
3158 /* Protected mode checks */
3159 /* CS */
3160 Assert(pCtx->cs.Attr.n.u1Present);
3161 Assert(!(pCtx->cs.Attr.u & 0xf00));
3162 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
3163 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
3164 || !(pCtx->cs.Attr.n.u1Granularity));
3165 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
3166 || (pCtx->cs.Attr.n.u1Granularity));
3167 Assert(pCtx->cs.Attr.u && pCtx->cs.Attr.u != HMVMX_SEL_UNUSABLE); /* CS cannot be loaded with NULL in protected mode. */
3168 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
3169 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
3170 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
3171 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
3172 else
3173 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
3174 /* SS */
3175 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3176 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
3177 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0));
3178 if ( !(pCtx->cr0 & X86_CR0_PE)
3179 || pCtx->cs.Attr.n.u4Type == 3)
3180 {
3181 Assert(!pCtx->ss.Attr.n.u2Dpl);
3182 }
3183 if (pCtx->ss.Attr.u && pCtx->ss.Attr.u != HMVMX_SEL_UNUSABLE)
3184 {
3185 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3186 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
3187 Assert(pCtx->ss.Attr.n.u1Present);
3188 Assert(!(pCtx->ss.Attr.u & 0xf00));
3189 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
3190 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
3191 || !(pCtx->ss.Attr.n.u1Granularity));
3192 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
3193 || (pCtx->ss.Attr.n.u1Granularity));
3194 }
3195 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
3196 if (pCtx->ds.Attr.u && pCtx->ds.Attr.u != HMVMX_SEL_UNUSABLE)
3197 {
3198 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3199 Assert(pCtx->ds.Attr.n.u1Present);
3200 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
3201 Assert(!(pCtx->ds.Attr.u & 0xf00));
3202 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
3203 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
3204 || !(pCtx->ds.Attr.n.u1Granularity));
3205 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
3206 || (pCtx->ds.Attr.n.u1Granularity));
3207 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3208 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
3209 }
3210 if (pCtx->es.Attr.u && pCtx->es.Attr.u != HMVMX_SEL_UNUSABLE)
3211 {
3212 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3213 Assert(pCtx->es.Attr.n.u1Present);
3214 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
3215 Assert(!(pCtx->es.Attr.u & 0xf00));
3216 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
3217 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
3218 || !(pCtx->es.Attr.n.u1Granularity));
3219 Assert( !(pCtx->es.u32Limit & 0xfff00000)
3220 || (pCtx->es.Attr.n.u1Granularity));
3221 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3222 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
3223 }
3224 if (pCtx->fs.Attr.u && pCtx->fs.Attr.u != HMVMX_SEL_UNUSABLE)
3225 {
3226 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3227 Assert(pCtx->fs.Attr.n.u1Present);
3228 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
3229 Assert(!(pCtx->fs.Attr.u & 0xf00));
3230 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
3231 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
3232 || !(pCtx->fs.Attr.n.u1Granularity));
3233 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
3234 || (pCtx->fs.Attr.n.u1Granularity));
3235 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3236 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3237 }
3238 if (pCtx->gs.Attr.u && pCtx->gs.Attr.u != HMVMX_SEL_UNUSABLE)
3239 {
3240 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3241 Assert(pCtx->gs.Attr.n.u1Present);
3242 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
3243 Assert(!(pCtx->gs.Attr.u & 0xf00));
3244 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
3245 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
3246 || !(pCtx->gs.Attr.n.u1Granularity));
3247 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
3248 || (pCtx->gs.Attr.n.u1Granularity));
3249 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3250 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3251 }
3252 /* 64-bit capable CPUs. */
3253# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3254 Assert(!(pCtx->cs.u64Base >> 32));
3255 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
3256 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
3257 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
3258# endif
3259 }
3260 else if ( CPUMIsGuestInV86ModeEx(pCtx)
3261 || ( CPUMIsGuestInRealModeEx(pCtx)
3262 && !pVM->hm.s.vmx.fUnrestrictedGuest))
3263 {
3264 /* Real and v86 mode checks. */
3265 /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
3266 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
3267 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3268 {
3269 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
3270 }
3271 else
3272 {
3273 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
3274 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
3275 }
3276
3277 /* CS */
3278 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
3279 Assert(pCtx->cs.u32Limit == 0xffff);
3280 Assert(u32CSAttr == 0xf3);
3281 /* SS */
3282 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
3283 Assert(pCtx->ss.u32Limit == 0xffff);
3284 Assert(u32SSAttr == 0xf3);
3285 /* DS */
3286 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
3287 Assert(pCtx->ds.u32Limit == 0xffff);
3288 Assert(u32DSAttr == 0xf3);
3289 /* ES */
3290 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
3291 Assert(pCtx->es.u32Limit == 0xffff);
3292 Assert(u32ESAttr == 0xf3);
3293 /* FS */
3294 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
3295 Assert(pCtx->fs.u32Limit == 0xffff);
3296 Assert(u32FSAttr == 0xf3);
3297 /* GS */
3298 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
3299 Assert(pCtx->gs.u32Limit == 0xffff);
3300 Assert(u32GSAttr == 0xf3);
3301 /* 64-bit capable CPUs. */
3302# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3303 Assert(!(pCtx->cs.u64Base >> 32));
3304 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
3305 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
3306 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
3307# endif
3308 }
3309}
3310#endif /* VBOX_STRICT */
3311
3312
3313/**
3314 * Writes a guest segment register into the guest-state area in the VMCS.
3315 *
3316 * @returns VBox status code.
3317 * @param pVCpu Pointer to the VMCPU.
3318 * @param idxSel Index of the selector in the VMCS.
3319 * @param idxLimit Index of the segment limit in the VMCS.
3320 * @param idxBase Index of the segment base in the VMCS.
3321 * @param idxAccess Index of the access rights of the segment in the VMCS.
3322 * @param pSelReg Pointer to the segment selector.
3323 * @param pCtx Pointer to the guest-CPU context.
3324 *
3325 * @remarks No-long-jump zone!!!
3326 */
3327static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
3328 uint32_t idxAccess, PCPUMSELREG pSelReg, PCPUMCTX pCtx)
3329{
3330 int rc;
3331 rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
3332 rc |= VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
3333 rc |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
3334 AssertRCReturn(rc, rc);
3335
3336 uint32_t u32Access = pSelReg->Attr.u;
3337 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3338 {
3339 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
3340 u32Access = 0xf3;
3341 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3342 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3343 }
3344 else
3345 {
3346 /*
3347 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
3348 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
3349 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
3350 * loaded in protected-mode have their attribute as 0.
3351 */
3352 if (!u32Access)
3353 u32Access = HMVMX_SEL_UNUSABLE;
3354 }
3355
3356 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
3357 AssertMsg((u32Access == HMVMX_SEL_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
3358 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
3359
3360 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
3361 AssertRCReturn(rc, rc);
3362 return rc;
3363}
3364
3365
3366/**
3367 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
3368 * into the guest-state area in the VMCS.
3369 *
3370 * @returns VBox status code.
3371 * @param pVM Pointer to the VM.
3372 * @param pVCPU Pointer to the VMCPU.
3373 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3374 * out-of-sync. Make sure to update the required fields
3375 * before using them.
3376 *
3377 * @remarks Requires CR0 (strict builds validation).
3378 * @remarks No-long-jump zone!!!
3379 */
3380static int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3381{
3382 int rc = VERR_INTERNAL_ERROR_5;
3383 PVM pVM = pVCpu->CTX_SUFF(pVM);
3384
3385 /*
3386 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
3387 */
3388 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
3389 {
3390 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
3391 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3392 {
3393 pVCpu->hm.s.vmx.RealMode.uAttrCS.u = pMixedCtx->cs.Attr.u;
3394 pVCpu->hm.s.vmx.RealMode.uAttrSS.u = pMixedCtx->ss.Attr.u;
3395 pVCpu->hm.s.vmx.RealMode.uAttrDS.u = pMixedCtx->ds.Attr.u;
3396 pVCpu->hm.s.vmx.RealMode.uAttrES.u = pMixedCtx->es.Attr.u;
3397 pVCpu->hm.s.vmx.RealMode.uAttrFS.u = pMixedCtx->fs.Attr.u;
3398 pVCpu->hm.s.vmx.RealMode.uAttrGS.u = pMixedCtx->gs.Attr.u;
3399 }
3400
3401#ifdef VBOX_WITH_REM
3402 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
3403 {
3404 Assert(pVM->hm.s.vmx.pRealModeTSS);
3405 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
3406 if ( pVCpu->hm.s.vmx.fWasInRealMode
3407 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
3408 {
3409 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
3410 in real-mode (e.g. OpenBSD 4.0) */
3411 REMFlushTBs(pVM);
3412 Log(("Load: Switch to protected mode detected!\n"));
3413 pVCpu->hm.s.vmx.fWasInRealMode = false;
3414 }
3415 }
3416#endif
3417 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
3418 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs, pMixedCtx);
3419 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
3420 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss, pMixedCtx);
3421 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
3422 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds, pMixedCtx);
3423 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
3424 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es, pMixedCtx);
3425 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
3426 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs, pMixedCtx);
3427 rc |= hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
3428 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs, pMixedCtx);
3429 AssertRCReturn(rc, rc);
3430
3431#ifdef VBOX_STRICT
3432 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
3433#endif
3434 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
3435 }
3436
3437 /*
3438 * Guest TR.
3439 */
3440 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
3441 {
3442 /*
3443 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
3444 * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
3445 * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
3446 */
3447 uint16_t u16Sel = 0;
3448 uint32_t u32Limit = 0;
3449 uint64_t u64Base = 0;
3450 uint32_t u32AccessRights = 0;
3451
3452 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3453 {
3454 u16Sel = pMixedCtx->tr.Sel;
3455 u32Limit = pMixedCtx->tr.u32Limit;
3456 u64Base = pMixedCtx->tr.u64Base;
3457 u32AccessRights = pMixedCtx->tr.Attr.u;
3458 }
3459 else
3460 {
3461 Assert(pVM->hm.s.vmx.pRealModeTSS);
3462 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
3463
3464 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
3465 RTGCPHYS GCPhys;
3466 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
3467 AssertRCReturn(rc, rc);
3468
3469 X86DESCATTR DescAttr;
3470 DescAttr.u = 0;
3471 DescAttr.n.u1Present = 1;
3472 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
3473
3474 u16Sel = 0;
3475 u32Limit = HM_VTX_TSS_SIZE;
3476 u64Base = GCPhys; /* in real-mode phys = virt. */
3477 u32AccessRights = DescAttr.u;
3478 }
3479
3480 /* Validate. */
3481 Assert(!(u16Sel & RT_BIT(2)));
3482 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
3483 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
3484 AssertMsg(!(u32AccessRights & HMVMX_SEL_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
3485 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
3486 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
3487 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
3488 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
3489 Assert( (u32Limit & 0xfff) == 0xfff
3490 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
3491 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
3492 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
3493
3494 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel);
3495 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit);
3496 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base);
3497 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights);
3498 AssertRCReturn(rc, rc);
3499
3500 Log(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base));
3501 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;
3502 }
3503
3504 /*
3505 * Guest GDTR.
3506 */
3507 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
3508 {
3509 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt);
3510 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt);
3511 AssertRCReturn(rc, rc);
3512
3513 Assert(!(pMixedCtx->gdtr.cbGdt & UINT64_C(0xffff0000))); /* Bits 31:16 MBZ. */
3514 Log(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt));
3515 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
3516 }
3517
3518 /*
3519 * Guest LDTR.
3520 */
3521 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
3522 {
3523 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
3524 uint32_t u32Access = 0;
3525 if (!pMixedCtx->ldtr.Attr.u)
3526 u32Access = HMVMX_SEL_UNUSABLE;
3527 else
3528 u32Access = pMixedCtx->ldtr.Attr.u;
3529
3530 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel);
3531 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit);
3532 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base);
3533 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access);
3534 AssertRCReturn(rc, rc);
3535
3536 /* Validate. */
3537 if (!(u32Access & HMVMX_SEL_UNUSABLE))
3538 {
3539 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
3540 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
3541 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
3542 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
3543 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
3544 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
3545 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
3546 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
3547 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
3548 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
3549 }
3550
3551 Log(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base));
3552 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;
3553 }
3554
3555 /*
3556 * Guest IDTR.
3557 */
3558 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
3559 {
3560 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt);
3561 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt);
3562 AssertRCReturn(rc, rc);
3563
3564 Assert(!(pMixedCtx->idtr.cbIdt & UINT64_C(0xffff0000))); /* Bits 31:16 MBZ. */
3565 Log(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt));
3566 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
3567 }
3568
3569 return VINF_SUCCESS;
3570}
3571
3572
3573/**
3574 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
3575 * areas. These MSRs will automatically be loaded to the host CPU on every
3576 * successful VM entry and stored from the host CPU on every successful VM exit.
3577 * Also loads the sysenter MSRs into the guest-state area in the VMCS.
3578 *
3579 * @returns VBox status code.
3580 * @param pVCpu Pointer to the VMCPU.
3581 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3582 * out-of-sync. Make sure to update the required fields
3583 * before using them.
3584 *
3585 * @remarks No-long-jump zone!!!
3586 */
3587static int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3588{
3589 AssertPtr(pVCpu);
3590 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
3591
3592 /*
3593 * MSRs covered by Auto-load/store: EFER, LSTAR, STAR, SF_MASK, TSC_AUX (RDTSCP).
3594 */
3595 int rc = VINF_SUCCESS;
3596 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
3597 {
3598#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
3599 PVM pVM = pVCpu->CTX_SUFF(pVM);
3600 PVMXMSR pGuestMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
3601 uint32_t cGuestMsrs = 0;
3602
3603 /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */
3604 const bool fSupportsNX = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
3605 const bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
3606 if (fSupportsNX || fSupportsLongMode)
3607 {
3608 /** @todo support save IA32_EFER, i.e.
3609 * VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR, in which case the
3610 * guest EFER need not be part of the VM-entry MSR-load area. */
3611 pGuestMsr->u32IndexMSR = MSR_K6_EFER;
3612 pGuestMsr->u32Reserved = 0;
3613 pGuestMsr->u64Value = pMixedCtx->msrEFER;
3614 /* VT-x will complain if only MSR_K6_EFER_LME is set. See Intel spec. 26.4 "Loading MSRs" for details. */
3615 if (!CPUMIsGuestInLongModeEx(pMixedCtx))
3616 pGuestMsr->u64Value &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
3617 pGuestMsr++; cGuestMsrs++;
3618 if (fSupportsLongMode)
3619 {
3620 pGuestMsr->u32IndexMSR = MSR_K8_LSTAR;
3621 pGuestMsr->u32Reserved = 0;
3622 pGuestMsr->u64Value = pMixedCtx->msrLSTAR; /* 64 bits mode syscall rip */
3623 pGuestMsr++; cGuestMsrs++;
3624 pGuestMsr->u32IndexMSR = MSR_K6_STAR;
3625 pGuestMsr->u32Reserved = 0;
3626 pGuestMsr->u64Value = pMixedCtx->msrSTAR; /* legacy syscall eip, cs & ss */
3627 pGuestMsr++; cGuestMsrs++;
3628 pGuestMsr->u32IndexMSR = MSR_K8_SF_MASK;
3629 pGuestMsr->u32Reserved = 0;
3630 pGuestMsr->u64Value = pMixedCtx->msrSFMASK; /* syscall flag mask */
3631 pGuestMsr++; cGuestMsrs++;
3632 pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
3633 pGuestMsr->u32Reserved = 0;
3634 pGuestMsr->u64Value = pMixedCtx->msrKERNELGSBASE; /* swapgs exchange value */
3635 pGuestMsr++; cGuestMsrs++;
3636 }
3637 }
3638
3639 /*
3640 * RDTSCP requires the TSC_AUX MSR. Host and guest share the physical MSR. So we have to
3641 * load the guest's copy if the guest can execute RDTSCP without causing VM-exits.
3642 */
3643 if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP)
3644 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP))
3645 {
3646 pGuestMsr->u32IndexMSR = MSR_K8_TSC_AUX;
3647 pGuestMsr->u32Reserved = 0;
3648 rc = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pGuestMsr->u64Value);
3649 AssertRCReturn(rc, rc);
3650 pGuestMsr++; cGuestMsrs++;
3651 }
3652
3653 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
3654 if (cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc))
3655 {
3656 LogRel(("CPU autoload/store MSR count in VMCS exceeded cGuestMsrs=%u.\n", cGuestMsrs));
3657 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3658 }
3659
3660 /* Update the VCPU's copy of the guest MSR count. */
3661 pVCpu->hm.s.vmx.cGuestMsrs = cGuestMsrs;
3662 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cGuestMsrs);
3663 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs);
3664 AssertRCReturn(rc, rc);
3665#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
3666
3667 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS;
3668 }
3669
3670 /*
3671 * Guest Sysenter MSRs.
3672 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
3673 * VM-exits on WRMSRs for these MSRs.
3674 */
3675 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
3676 {
3677 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs);
3678 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_CS_MSR;
3679 }
3680 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
3681 {
3682 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip);
3683 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR;
3684 }
3685 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
3686 {
3687 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp);
3688 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR;
3689 }
3690 AssertRCReturn(rc, rc);
3691
3692 return rc;
3693}
3694
3695
3696/**
3697 * Loads the guest activity state into the guest-state area in the VMCS.
3698 *
3699 * @returns VBox status code.
3700 * @param pVCpu Pointer to the VMCPU.
3701 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3702 * out-of-sync. Make sure to update the required fields
3703 * before using them.
3704 *
3705 * @remarks No-long-jump zone!!!
3706 */
3707static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
3708{
3709 /** @todo See if we can make use of other states, e.g.
3710 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
3711 int rc = VINF_SUCCESS;
3712 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_ACTIVITY_STATE)
3713 {
3714 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
3715 AssertRCReturn(rc, rc);
3716 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_ACTIVITY_STATE;
3717 }
3718 return rc;
3719}
3720
3721
3722/**
3723 * Sets up the appropriate function to run guest code.
3724 *
3725 * @returns VBox status code.
3726 * @param pVCpu Pointer to the VMCPU.
3727 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3728 * out-of-sync. Make sure to update the required fields
3729 * before using them.
3730 *
3731 * @remarks No-long-jump zone!!!
3732 */
3733static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3734{
3735 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3736 {
3737#ifndef VBOX_ENABLE_64_BITS_GUESTS
3738 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
3739#endif
3740 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
3741#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3742 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
3743 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
3744#else
3745 /* 64-bit host or hybrid host. */
3746 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
3747#endif
3748 }
3749 else
3750 {
3751 /* Guest is not in long mode, use the 32-bit handler. */
3752 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
3753 }
3754 Assert(pVCpu->hm.s.vmx.pfnStartVM);
3755 return VINF_SUCCESS;
3756}
3757
3758
3759/**
3760 * Wrapper for running the guest code in VT-x.
3761 *
3762 * @returns VBox strict status code.
3763 * @param pVM Pointer to the VM.
3764 * @param pVCpu Pointer to the VMCPU.
3765 * @param pCtx Pointer to the guest-CPU context.
3766 *
3767 * @remarks No-long-jump zone!!!
3768 */
3769DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3770{
3771 /*
3772 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
3773 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
3774 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
3775 */
3776#ifdef VBOX_WITH_KERNEL_USING_XMM
3777 return hmR0VMXStartVMWrapXMM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
3778#else
3779 return pVCpu->hm.s.vmx.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
3780#endif
3781}
3782
3783
3784/**
3785 * Report world-switch error and dump some useful debug info.
3786 *
3787 * @param pVM Pointer to the VM.
3788 * @param pVCpu Pointer to the VMCPU.
3789 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
3790 * @param pCtx Pointer to the guest-CPU context.
3791 * @param pVmxTransient Pointer to the VMX transient structure (only
3792 * exitReason updated).
3793 */
3794static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
3795{
3796 Assert(pVM);
3797 Assert(pVCpu);
3798 Assert(pCtx);
3799 Assert(pVmxTransient);
3800 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3801
3802 Log(("VM-entry failure: %Rrc\n", rcVMRun));
3803 switch (rcVMRun)
3804 {
3805 case VERR_VMX_INVALID_VMXON_PTR:
3806 AssertFailed();
3807 break;
3808 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
3809 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
3810 {
3811 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.lasterror.u32ExitReason);
3812 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.lasterror.u32InstrError);
3813 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
3814 AssertRC(rc);
3815
3816#ifdef VBOX_STRICT
3817 Log(("uExitReason %#x (VmxTransient %#x)\n", pVCpu->hm.s.vmx.lasterror.u32ExitReason,
3818 pVmxTransient->uExitReason));
3819 Log(("Exit Qualification %#x\n", pVmxTransient->uExitQualification));
3820 Log(("InstrError %#x\n", pVCpu->hm.s.vmx.lasterror.u32InstrError));
3821 if (pVCpu->hm.s.vmx.lasterror.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
3822 Log(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.lasterror.u32InstrError]));
3823 else
3824 Log(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
3825
3826 /* VMX control bits. */
3827 uint32_t u32Val;
3828 uint64_t u64Val;
3829 HMVMXHCUINTREG uHCReg;
3830 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, &u32Val); AssertRC(rc);
3831 Log(("VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS %#RX32\n", u32Val));
3832 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, &u32Val); AssertRC(rc);
3833 Log(("VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS %#RX32\n", u32Val));
3834 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2, &u32Val); AssertRC(rc);
3835 Log(("VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2 %#RX32\n", u32Val));
3836 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_CONTROLS, &u32Val); AssertRC(rc);
3837 Log(("VMX_VMCS32_CTRL_ENTRY_CONTROLS %#RX32\n", u32Val));
3838 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_CONTROLS, &u32Val); AssertRC(rc);
3839 Log(("VMX_VMCS32_CTRL_EXIT_CONTROLS %#RX32\n", u32Val));
3840 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
3841 Log(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
3842 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
3843 Log(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
3844 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
3845 Log(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
3846 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
3847 Log(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
3848 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
3849 Log(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
3850 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
3851 Log(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
3852 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
3853 Log(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
3854 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
3855 Log(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
3856 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
3857 Log(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
3858 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
3859 Log(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
3860 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
3861 Log(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
3862 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
3863 Log(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
3864 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
3865 Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
3866 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
3867 Log(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
3868 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
3869 Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
3870 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
3871 Log(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
3872
3873 /* Guest bits. */
3874 RTGCUINTREG uGCReg;
3875 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &uGCReg); AssertRC(rc);
3876 Log(("Old Guest Rip %#RGv New %#RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)uGCReg));
3877 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &uGCReg); AssertRC(rc);
3878 Log(("Old Guest Rsp %#RGv New %#RGv\n", (RTGCPTR)pCtx->rsp, (RTGCPTR)uGCReg));
3879 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
3880 Log(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
3881 rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc);
3882 Log(("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val));
3883
3884 /* Host bits. */
3885 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
3886 Log(("Host CR0 %#RHr\n", uHCReg));
3887 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
3888 Log(("Host CR3 %#RHr\n", uHCReg));
3889 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
3890 Log(("Host CR4 %#RHr\n", uHCReg));
3891
3892 RTGDTR HostGdtr;
3893 PCX86DESCHC pDesc;
3894 ASMGetGDTR(&HostGdtr);
3895 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_CS, &u32Val);
3896 Log(("Host CS %#08x\n", u32Val));
3897 if (u32Val < HostGdtr.cbGdt)
3898 {
3899 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3900 HMR0DumpDescriptor(pDesc, u32Val, "CS: ");
3901 }
3902
3903 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc);
3904 Log(("Host DS %#08x\n", u32Val));
3905 if (u32Val < HostGdtr.cbGdt)
3906 {
3907 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3908 HMR0DumpDescriptor(pDesc, u32Val, "DS: ");
3909 }
3910
3911 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc);
3912 Log(("Host ES %#08x\n", u32Val));
3913 if (u32Val < HostGdtr.cbGdt)
3914 {
3915 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3916 HMR0DumpDescriptor(pDesc, u32Val, "ES: ");
3917 }
3918
3919 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc);
3920 Log(("Host FS %#08x\n", u32Val));
3921 if (u32Val < HostGdtr.cbGdt)
3922 {
3923 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3924 HMR0DumpDescriptor(pDesc, u32Val, "FS: ");
3925 }
3926
3927 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc);
3928 Log(("Host GS %#08x\n", u32Val));
3929 if (u32Val < HostGdtr.cbGdt)
3930 {
3931 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3932 HMR0DumpDescriptor(pDesc, u32Val, "GS: ");
3933 }
3934
3935 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc);
3936 Log(("Host SS %#08x\n", u32Val));
3937 if (u32Val < HostGdtr.cbGdt)
3938 {
3939 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3940 HMR0DumpDescriptor(pDesc, u32Val, "SS: ");
3941 }
3942
3943 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc);
3944 Log(("Host TR %#08x\n", u32Val));
3945 if (u32Val < HostGdtr.cbGdt)
3946 {
3947 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
3948 HMR0DumpDescriptor(pDesc, u32Val, "TR: ");
3949 }
3950
3951 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
3952 Log(("Host TR Base %#RHv\n", uHCReg));
3953 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
3954 Log(("Host GDTR Base %#RHv\n", uHCReg));
3955 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
3956 Log(("Host IDTR Base %#RHv\n", uHCReg));
3957 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
3958 Log(("Host SYSENTER CS %#08x\n", u32Val));
3959 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
3960 Log(("Host SYSENTER EIP %#RHv\n", uHCReg));
3961 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
3962 Log(("Host SYSENTER ESP %#RHv\n", uHCReg));
3963 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
3964 Log(("Host RSP %#RHv\n", uHCReg));
3965 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
3966 Log(("Host RIP %#RHv\n", uHCReg));
3967# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3968 if (HMVMX_IS_64BIT_HOST_MODE())
3969 {
3970 Log(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
3971 Log(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
3972 Log(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
3973 Log(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
3974 Log(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
3975 Log(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
3976 }
3977# endif
3978#endif /* VBOX_STRICT */
3979 break;
3980 }
3981
3982 default:
3983 /* Impossible */
3984 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
3985 break;
3986 }
3987 NOREF(pVM);
3988}
3989
3990
3991#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3992#ifndef VMX_USE_CACHED_VMCS_ACCESSES
3993# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
3994#endif
3995#ifdef VBOX_STRICT
3996static bool hmR0VmxIsValidWriteField(uint32_t idxField)
3997{
3998 switch (idxField)
3999 {
4000 case VMX_VMCS_GUEST_RIP:
4001 case VMX_VMCS_GUEST_RSP:
4002 case VMX_VMCS_GUEST_DR7:
4003 case VMX_VMCS_GUEST_SYSENTER_EIP:
4004 case VMX_VMCS_GUEST_SYSENTER_ESP:
4005 case VMX_VMCS_GUEST_GDTR_BASE:
4006 case VMX_VMCS_GUEST_IDTR_BASE:
4007 case VMX_VMCS_GUEST_CS_BASE:
4008 case VMX_VMCS_GUEST_DS_BASE:
4009 case VMX_VMCS_GUEST_ES_BASE:
4010 case VMX_VMCS_GUEST_FS_BASE:
4011 case VMX_VMCS_GUEST_GS_BASE:
4012 case VMX_VMCS_GUEST_SS_BASE:
4013 case VMX_VMCS_GUEST_LDTR_BASE:
4014 case VMX_VMCS_GUEST_TR_BASE:
4015 case VMX_VMCS_GUEST_CR3:
4016 return true;
4017 }
4018 return false;
4019}
4020
4021static bool hmR0VmxIsValidReadField(uint32_t idxField)
4022{
4023 switch (idxField)
4024 {
4025 /* Read-only fields. */
4026 case VMX_VMCS_RO_EXIT_QUALIFICATION:
4027 return true;
4028 }
4029 /* Remaining readable fields should also be writable. */
4030 return hmR0VmxIsValidWriteField(idxField);
4031}
4032#endif /* VBOX_STRICT */
4033
4034/**
4035 * Executes the specified handler in 64-bit mode.
4036 *
4037 * @returns VBox status code.
4038 * @param pVM Pointer to the VM.
4039 * @param pVCpu Pointer to the VMCPU.
4040 * @param pCtx Pointer to the guest CPU context.
4041 * @param enmOp The operation to perform.
4042 * @param cbParam Number of parameters.
4043 * @param paParam Array of 32-bit parameters.
4044 */
4045VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
4046 uint32_t *paParam)
4047{
4048 int rc, rc2;
4049 PHMGLOBLCPUINFO pCpu;
4050 RTHCPHYS HCPhysCpuPage;
4051 RTCCUINTREG uOldEFlags;
4052
4053 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
4054 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
4055 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
4056 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
4057
4058#ifdef VBOX_STRICT
4059 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
4060 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
4061
4062 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
4063 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
4064#endif
4065
4066 /* Disable interrupts. */
4067 uOldEFlags = ASMIntDisableFlags();
4068
4069#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
4070 RTCPUID idHostCpu = RTMpCpuId();
4071 CPUMR0SetLApic(pVM, idHostCpu);
4072#endif
4073
4074 pCpu = HMR0GetCurrentCpu();
4075 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4076
4077 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
4078 VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
4079
4080 /* Leave VMX Root Mode. */
4081 VMXDisable();
4082
4083 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4084
4085 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
4086 CPUMSetHyperEIP(pVCpu, enmOp);
4087 for (int i = (int)cbParam - 1; i >= 0; i--)
4088 CPUMPushHyper(pVCpu, paParam[i]);
4089
4090 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
4091
4092 /* Call the switcher. */
4093 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
4094 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
4095
4096 /** @todo replace with hmR0VmxEnterRootMode() and LeaveRootMode(). */
4097 /* Make sure the VMX instructions don't cause #UD faults. */
4098 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
4099
4100 /* Re-enter VMX Root Mode */
4101 rc2 = VMXEnable(HCPhysCpuPage);
4102 if (RT_FAILURE(rc2))
4103 {
4104 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4105 ASMSetFlags(uOldEFlags);
4106 return rc2;
4107 }
4108
4109 rc2 = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
4110 AssertRC(rc2);
4111 Assert(!(ASMGetFlags() & X86_EFL_IF));
4112 ASMSetFlags(uOldEFlags);
4113 return rc;
4114}
4115
4116
4117/**
4118 * Prepares for and executes VMLAUNCH (64 bits guests) for 32-bit hosts
4119 * supporting 64-bit guests.
4120 *
4121 * @returns VBox status code.
4122 * @param fResume Whether to VMLAUNCH or VMRESUME.
4123 * @param pCtx Pointer to the guest-CPU context.
4124 * @param pCache Pointer to the VMCS cache.
4125 * @param pVM Pointer to the VM.
4126 * @param pVCpu Pointer to the VMCPU.
4127 */
4128DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
4129{
4130 uint32_t aParam[6];
4131 PHMGLOBLCPUINFO pCpu = NULL;
4132 RTHCPHYS HCPhysCpuPage = 0;
4133 int rc = VERR_INTERNAL_ERROR_5;
4134
4135 pCpu = HMR0GetCurrentCpu();
4136 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4137
4138#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4139 pCache->uPos = 1;
4140 pCache->interPD = PGMGetInterPaeCR3(pVM);
4141 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
4142#endif
4143
4144#ifdef VBOX_STRICT
4145 pCache->TestIn.HCPhysCpuPage = 0;
4146 pCache->TestIn.HCPhysVmcs = 0;
4147 pCache->TestIn.pCache = 0;
4148 pCache->TestOut.HCPhysVmcs = 0;
4149 pCache->TestOut.pCache = 0;
4150 pCache->TestOut.pCtx = 0;
4151 pCache->TestOut.eflags = 0;
4152#endif
4153
4154 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
4155 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
4156 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
4157 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
4158 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
4159 aParam[5] = 0;
4160
4161#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4162 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
4163 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
4164#endif
4165 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, 6, &aParam[0]);
4166
4167#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4168 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
4169 Assert(pCtx->dr[4] == 10);
4170 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
4171#endif
4172
4173#ifdef VBOX_STRICT
4174 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
4175 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4176 pVCpu->hm.s.vmx.HCPhysVmcs));
4177 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4178 pCache->TestOut.HCPhysVmcs));
4179 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
4180 pCache->TestOut.pCache));
4181 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
4182 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
4183 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
4184 pCache->TestOut.pCtx));
4185 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
4186#endif
4187 return rc;
4188}
4189
4190
4191/**
4192 * Initialize the VMCS-Read cache. The VMCS cache is used for 32-bit hosts
4193 * running 64-bit guests (except 32-bit Darwin which runs with 64-bit paging in
4194 * 32-bit mode) for 64-bit fields that cannot be accessed in 32-bit mode. Some
4195 * 64-bit fields -can- be accessed (those that have a 32-bit FULL & HIGH part).
4196 *
4197 * @returns VBox status code.
4198 * @param pVM Pointer to the VM.
4199 * @param pVCpu Pointer to the VMCPU.
4200 */
4201static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
4202{
4203#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
4204{ \
4205 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
4206 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
4207 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
4208 ++cReadFields; \
4209}
4210
4211 AssertPtr(pVM);
4212 AssertPtr(pVCpu);
4213 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4214 uint32_t cReadFields = 0;
4215
4216 /* Guest-natural selector base fields */
4217#if 0
4218 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
4219 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
4220 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
4221#endif
4222 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
4223 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
4224 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
4225 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
4226 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
4227 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
4228 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
4229 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
4230 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
4231 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
4232 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DR7);
4233 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
4234 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
4235#if 0
4236 /* Unused natural width guest-state fields. */
4237 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
4238 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
4239#endif
4240 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
4241 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
4242
4243 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
4244#if 0
4245 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
4246 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
4247 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
4248 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
4249 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
4250 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
4251 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
4252 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
4253 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
4254#endif
4255
4256 /* Natural width guest-state fields. */
4257 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
4258#if 0
4259 /* Currently unused field. */
4260 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
4261#endif
4262
4263 if (pVM->hm.s.fNestedPaging)
4264 {
4265 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
4266 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
4267 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
4268 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
4269 }
4270 else
4271 {
4272 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
4273 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
4274 }
4275
4276#undef VMXLOCAL_INIT_READ_CACHE_FIELD
4277 return VINF_SUCCESS;
4278}
4279
4280
4281/**
4282 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
4283 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
4284 * darwin, running 64-bit guests).
4285 *
4286 * @returns VBox status code.
4287 * @param pVCpu Pointer to the VMCPU.
4288 * @param idxField The VMCS field encoding.
4289 * @param u64Val 16, 32 or 64 bits value.
4290 */
4291VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4292{
4293 int rc;
4294 switch (idxField)
4295 {
4296 /*
4297 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
4298 */
4299 /* 64-bit Control fields. */
4300 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
4301 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
4302 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
4303 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
4304 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
4305 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
4306 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
4307 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
4308 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
4309 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
4310 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
4311 case VMX_VMCS64_CTRL_EPTP_FULL:
4312 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
4313 /* 64-bit Guest-state fields. */
4314 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
4315 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
4316 case VMX_VMCS64_GUEST_PAT_FULL:
4317 case VMX_VMCS64_GUEST_EFER_FULL:
4318 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
4319 case VMX_VMCS64_GUEST_PDPTE0_FULL:
4320 case VMX_VMCS64_GUEST_PDPTE1_FULL:
4321 case VMX_VMCS64_GUEST_PDPTE2_FULL:
4322 case VMX_VMCS64_GUEST_PDPTE3_FULL:
4323 /* 64-bit Host-state fields. */
4324 case VMX_VMCS64_HOST_FIELD_PAT_FULL:
4325 case VMX_VMCS64_HOST_FIELD_EFER_FULL:
4326 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
4327 {
4328 rc = VMXWriteVmcs32(idxField, u64Val);
4329 rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32));
4330 break;
4331 }
4332
4333 /*
4334 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
4335 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
4336 */
4337 /* Natural-width Guest-state fields. */
4338 case VMX_VMCS_GUEST_CR3:
4339 case VMX_VMCS_GUEST_ES_BASE:
4340 case VMX_VMCS_GUEST_CS_BASE:
4341 case VMX_VMCS_GUEST_SS_BASE:
4342 case VMX_VMCS_GUEST_DS_BASE:
4343 case VMX_VMCS_GUEST_FS_BASE:
4344 case VMX_VMCS_GUEST_GS_BASE:
4345 case VMX_VMCS_GUEST_LDTR_BASE:
4346 case VMX_VMCS_GUEST_TR_BASE:
4347 case VMX_VMCS_GUEST_GDTR_BASE:
4348 case VMX_VMCS_GUEST_IDTR_BASE:
4349 case VMX_VMCS_GUEST_DR7:
4350 case VMX_VMCS_GUEST_RSP:
4351 case VMX_VMCS_GUEST_RIP:
4352 case VMX_VMCS_GUEST_SYSENTER_ESP:
4353 case VMX_VMCS_GUEST_SYSENTER_EIP:
4354 {
4355 if (!(u64Val >> 32))
4356 {
4357 /* If this field is 64-bit, VT-x will zero out the top bits. */
4358 rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
4359 }
4360 else
4361 {
4362 /* Assert that only the 32->64 switcher case should ever come here. */
4363 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
4364 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
4365 }
4366 break;
4367 }
4368
4369 default:
4370 {
4371 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
4372 rc = VERR_INVALID_PARAMETER;
4373 break;
4374 }
4375 }
4376 AssertRCReturn(rc, rc);
4377 return rc;
4378}
4379
4380
4381/**
4382 * Queue up a VMWRITE by using the VMCS write cache. This is only used on 32-bit
4383 * hosts (except darwin) for 64-bit guests.
4384 *
4385 * @param pVCpu Pointer to the VMCPU.
4386 * @param idxField The VMCS field encoding.
4387 * @param u64Val 16, 32 or 64 bits value.
4388 */
4389VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4390{
4391 AssertPtr(pVCpu);
4392 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4393
4394 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
4395 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
4396
4397 /* Make sure there are no duplicates. */
4398 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
4399 {
4400 if (pCache->Write.aField[i] == idxField)
4401 {
4402 pCache->Write.aFieldVal[i] = u64Val;
4403 return VINF_SUCCESS;
4404 }
4405 }
4406
4407 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
4408 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
4409 pCache->Write.cValidEntries++;
4410 return VINF_SUCCESS;
4411}
4412
4413/* Enable later when the assembly code uses these as callbacks. */
4414#if 0
4415/*
4416 * Loads the VMCS write-cache into the CPU (by executing VMWRITEs).
4417 *
4418 * @param pVCpu Pointer to the VMCPU.
4419 * @param pCache Pointer to the VMCS cache.
4420 *
4421 * @remarks No-long-jump zone!!!
4422 */
4423VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache)
4424{
4425 AssertPtr(pCache);
4426 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
4427 {
4428 int rc = VMXWriteVmcs64(pCache->Write.aField[i], pCache->Write.aFieldVal[i]);
4429 AssertRC(rc);
4430 }
4431 pCache->Write.cValidEntries = 0;
4432}
4433
4434
4435/**
4436 * Stores the VMCS read-cache from the CPU (by executing VMREADs).
4437 *
4438 * @param pVCpu Pointer to the VMCPU.
4439 * @param pCache Pointer to the VMCS cache.
4440 *
4441 * @remarks No-long-jump zone!!!
4442 */
4443VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache)
4444{
4445 AssertPtr(pCache);
4446 for (uint32_t i = 0; i < pCache->Read.cValidEntries; i++)
4447 {
4448 int rc = VMXReadVmcs64(pCache->Read.aField[i], &pCache->Read.aFieldVal[i]);
4449 AssertRC(rc);
4450 }
4451}
4452#endif
4453#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
4454
4455
4456/**
4457 * Sets up the usage of TSC-offsetting and updates the VMCS. If offsetting is
4458 * not possible, cause VM-exits on RDTSC(P)s. Also sets up the VMX preemption
4459 * timer.
4460 *
4461 * @returns VBox status code.
4462 * @param pVCpu Pointer to the VMCPU.
4463 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4464 * out-of-sync. Make sure to update the required fields
4465 * before using them.
4466 * @remarks No-long-jump zone!!!
4467 */
4468static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4469{
4470 int rc = VERR_INTERNAL_ERROR_5;
4471 bool fOffsettedTsc = false;
4472 PVM pVM = pVCpu->CTX_SUFF(pVM);
4473 if (pVM->hm.s.vmx.fUsePreemptTimer)
4474 {
4475 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset);
4476
4477 /* Make sure the returned values have sane upper and lower boundaries. */
4478 uint64_t u64CpuHz = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
4479 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
4480 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
4481 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
4482
4483 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
4484 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);
4485 }
4486 else
4487 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset);
4488
4489 if (fOffsettedTsc)
4490 {
4491 uint64_t u64CurTSC = ASMReadTSC();
4492 if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
4493 {
4494 /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
4495 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
4496
4497 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
4498 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4499 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
4500 }
4501 else
4502 {
4503 /* VM-exit on RDTSC(P) as we would otherwise pass decreasing TSC values to the guest. */
4504 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
4505 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4506 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
4507 }
4508 }
4509 else
4510 {
4511 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
4512 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
4513 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4514 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
4515 }
4516}
4517
4518
4519/**
4520 * Determines if an exception is a contributory exception. Contributory
4521 * exceptions are ones which can cause double-faults. Page-fault is
4522 * intentionally not included here as it's a conditional contributory exception.
4523 *
4524 * @returns true if the exception is contributory, false otherwise.
4525 * @param uVector The exception vector.
4526 */
4527DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
4528{
4529 switch (uVector)
4530 {
4531 case X86_XCPT_GP:
4532 case X86_XCPT_SS:
4533 case X86_XCPT_NP:
4534 case X86_XCPT_TS:
4535 case X86_XCPT_DE:
4536 return true;
4537 default:
4538 break;
4539 }
4540 return false;
4541}
4542
4543
4544/**
4545 * Sets an event as a pending event to be injected into the guest.
4546 *
4547 * @param pVCpu Pointer to the VMCPU.
4548 * @param u32IntrInfo The VM-entry interruption-information field.
4549 * @param cbInstr The VM-entry instruction length in bytes (for software
4550 * interrupts, exceptions and privileged software
4551 * exceptions).
4552 * @param u32ErrCode The VM-entry exception error code.
4553 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
4554 * page-fault.
4555 */
4556DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntrInfo, uint32_t cbInstr, uint32_t u32ErrCode,
4557 RTGCUINTPTR GCPtrFaultAddress)
4558{
4559 Assert(!pVCpu->hm.s.Event.fPending);
4560 pVCpu->hm.s.Event.fPending = true;
4561 pVCpu->hm.s.Event.u64IntrInfo = u32IntrInfo;
4562 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
4563 pVCpu->hm.s.Event.cbInstr = cbInstr;
4564 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
4565}
4566
4567
4568/**
4569 * Sets a double-fault (#DF) exception as pending-for-injection into the VM.
4570 *
4571 * @param pVCpu Pointer to the VMCPU.
4572 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4573 * out-of-sync. Make sure to update the required fields
4574 * before using them.
4575 */
4576DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4577{
4578 /* Inject the double-fault. */
4579 uint32_t u32IntrInfo = X86_XCPT_DF | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
4580 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
4581 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
4582 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
4583}
4584
4585
4586/**
4587 * Handle a condition that occurred while delivering an event through the guest
4588 * IDT.
4589 *
4590 * @returns VBox status code (informational error codes included).
4591 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
4592 * @retval VINF_VMX_DOUBLE_FAULT if a #DF condition was detected and we ought to
4593 * continue execution of the guest which will delivery the #DF.
4594 * @retval VINF_EM_RESET if we detected a triple-fault condition.
4595 *
4596 * @param pVCpu Pointer to the VMCPU.
4597 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4598 * out-of-sync. Make sure to update the required fields
4599 * before using them.
4600 * @param pVmxTransient Pointer to the VMX transient structure.
4601 *
4602 * @remarks No-long-jump zone!!!
4603 */
4604static int hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
4605{
4606 int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
4607 AssertRC(rc);
4608 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
4609 {
4610 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
4611 AssertRCReturn(rc, rc);
4612
4613 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
4614 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntrInfo);
4615 uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
4616
4617 typedef enum
4618 {
4619 VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
4620 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
4621 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
4622 VMXREFLECTXCPT_NONE /* Nothing to reflect. */
4623 } VMXREFLECTXCPT;
4624
4625 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
4626 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
4627 if (uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
4628 {
4629 enmReflect = VMXREFLECTXCPT_XCPT;
4630#ifdef VBOX_STRICT
4631 if ( hmR0VmxIsContributoryXcpt(uIdtVector)
4632 && uExitVector == X86_XCPT_PF)
4633 {
4634 Log(("IDT: Contributory #PF uCR2=%#RGv\n", pMixedCtx->cr2));
4635 }
4636#endif
4637 if ( uExitVector == X86_XCPT_PF
4638 && uIdtVector == X86_XCPT_PF)
4639 {
4640 pVmxTransient->fVectoringPF = true;
4641 Log(("IDT: Vectoring #PF uCR2=%#RGv\n", pMixedCtx->cr2));
4642 }
4643 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
4644 && hmR0VmxIsContributoryXcpt(uExitVector)
4645 && ( hmR0VmxIsContributoryXcpt(uIdtVector)
4646 || uIdtVector == X86_XCPT_PF))
4647 {
4648 enmReflect = VMXREFLECTXCPT_DF;
4649 }
4650 else if (uIdtVector == X86_XCPT_DF)
4651 enmReflect = VMXREFLECTXCPT_TF;
4652 }
4653 else if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
4654 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
4655 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
4656 {
4657 /*
4658 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and privileged software exception
4659 * (whatever they are) as they reoccur when restarting the instruction.
4660 */
4661 enmReflect = VMXREFLECTXCPT_XCPT;
4662 }
4663
4664 switch (enmReflect)
4665 {
4666 case VMXREFLECTXCPT_XCPT:
4667 {
4668 uint32_t u32ErrCode = 0;
4669 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo))
4670 {
4671 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
4672 AssertRCReturn(rc, rc);
4673 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
4674 }
4675
4676 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
4677 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INTR_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
4678 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
4679 rc = VINF_SUCCESS;
4680 Log(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntrInfo,
4681 pVCpu->hm.s.Event.u32ErrCode));
4682 break;
4683 }
4684
4685 case VMXREFLECTXCPT_DF:
4686 {
4687 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
4688 rc = VINF_VMX_DOUBLE_FAULT;
4689 Log(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo,
4690 uIdtVector, uExitVector));
4691 break;
4692 }
4693
4694 case VMXREFLECTXCPT_TF:
4695 {
4696 Log(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));
4697 rc = VINF_EM_RESET;
4698 break;
4699 }
4700
4701 default:
4702 Assert(rc == VINF_SUCCESS);
4703 break;
4704 }
4705 }
4706 Assert(rc == VINF_SUCCESS || rc == VINF_VMX_DOUBLE_FAULT || rc == VINF_EM_RESET);
4707 return rc;
4708}
4709
4710
4711/**
4712 * Saves the guest's CR0 register from the VMCS into the guest-CPU context.
4713 *
4714 * @returns VBox status code.
4715 * @param pVCpu Pointer to the VMCPU.
4716 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4717 * out-of-sync. Make sure to update the required fields
4718 * before using them.
4719 *
4720 * @remarks No-long-jump zone!!!
4721 */
4722static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4723{
4724 int rc = VINF_SUCCESS;
4725 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0))
4726 {
4727 uint32_t uVal = 0;
4728 uint32_t uShadow = 0;
4729 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal);
4730 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
4731 AssertRCReturn(rc, rc);
4732 uVal = (uShadow & pVCpu->hm.s.vmx.cr0_mask) | (uVal & ~pVCpu->hm.s.vmx.cr0_mask);
4733 CPUMSetGuestCR0(pVCpu, uVal);
4734 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR0;
4735 }
4736 return rc;
4737}
4738
4739
4740/**
4741 * Saves the guest's CR4 register from the VMCS into the guest-CPU context.
4742 *
4743 * @returns VBox status code.
4744 * @param pVCpu Pointer to the VMCPU.
4745 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4746 * out-of-sync. Make sure to update the required fields
4747 * before using them.
4748 *
4749 * @remarks No-long-jump zone!!!
4750 */
4751static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4752{
4753 int rc = VINF_SUCCESS;
4754 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4))
4755 {
4756 uint32_t uVal = 0;
4757 uint32_t uShadow = 0;
4758 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal);
4759 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
4760 AssertRCReturn(rc, rc);
4761 uVal = (uShadow & pVCpu->hm.s.vmx.cr4_mask) | (uVal & ~pVCpu->hm.s.vmx.cr4_mask);
4762 CPUMSetGuestCR4(pVCpu, uVal);
4763 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR4;
4764 }
4765 return rc;
4766}
4767
4768
4769/**
4770 * Saves the guest's RIP register from the VMCS into the guest-CPU context.
4771 *
4772 * @returns VBox status code.
4773 * @param pVCpu Pointer to the VMCPU.
4774 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4775 * out-of-sync. Make sure to update the required fields
4776 * before using them.
4777 *
4778 * @remarks No-long-jump zone!!!
4779 */
4780static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4781{
4782 int rc = VINF_SUCCESS;
4783 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP))
4784 {
4785 RTGCUINTREG uVal = 0;
4786 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &uVal);
4787 AssertRCReturn(rc, rc);
4788 pMixedCtx->rip = uVal;
4789 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RIP;
4790 }
4791 return rc;
4792}
4793
4794
4795/**
4796 * Saves the guest's RSP register from the VMCS into the guest-CPU context.
4797 *
4798 * @returns VBox status code.
4799 * @param pVCpu Pointer to the VMCPU.
4800 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4801 * out-of-sync. Make sure to update the required fields
4802 * before using them.
4803 *
4804 * @remarks No-long-jump zone!!!
4805 */
4806static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4807{
4808 int rc = VINF_SUCCESS;
4809 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RSP))
4810 {
4811 RTGCUINTREG uVal = 0;
4812 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &uVal);
4813 AssertRCReturn(rc, rc);
4814 pMixedCtx->rsp = uVal;
4815 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RSP;
4816 }
4817 return rc;
4818}
4819
4820
4821/**
4822 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
4823 *
4824 * @returns VBox status code.
4825 * @param pVCpu Pointer to the VMCPU.
4826 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4827 * out-of-sync. Make sure to update the required fields
4828 * before using them.
4829 *
4830 * @remarks No-long-jump zone!!!
4831 */
4832static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4833{
4834 int rc = VINF_SUCCESS;
4835 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS))
4836 {
4837 uint32_t uVal = 0;
4838 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal);
4839 AssertRCReturn(rc, rc);
4840 pMixedCtx->eflags.u32 = uVal;
4841
4842 /* Undo our real-on-v86-mode changes to eflags if necessary. */
4843 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4844 {
4845 PVM pVM = pVCpu->CTX_SUFF(pVM);
4846 Assert(pVM->hm.s.vmx.pRealModeTSS);
4847 Log(("Saving real-mode RFLAGS VT-x view=%#RX64\n", pMixedCtx->rflags.u64));
4848 pMixedCtx->eflags.Bits.u1VM = 0;
4849 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.eflags.Bits.u2IOPL;
4850 }
4851
4852 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RFLAGS;
4853 }
4854 return rc;
4855}
4856
4857
4858/**
4859 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
4860 * guest-CPU context.
4861 */
4862DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4863{
4864 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
4865 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
4866 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
4867 return rc;
4868}
4869
4870
4871/**
4872 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
4873 * from the guest-state area in the VMCS.
4874 *
4875 * @param pVCpu Pointer to the VMCPU.
4876 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4877 * out-of-sync. Make sure to update the required fields
4878 * before using them.
4879 *
4880 * @remarks No-long-jump zone!!!
4881 */
4882static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4883{
4884 uint32_t uIntrState = 0;
4885 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
4886 AssertRC(rc);
4887
4888 if (!uIntrState)
4889 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
4890 else
4891 {
4892 Assert( uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
4893 || uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
4894 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
4895 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */
4896 AssertRC(rc);
4897 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
4898 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
4899 }
4900}
4901
4902
4903/**
4904 * Saves the guest's activity state.
4905 *
4906 * @returns VBox status code.
4907 * @param pVCpu Pointer to the VMCPU.
4908 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4909 * out-of-sync. Make sure to update the required fields
4910 * before using them.
4911 *
4912 * @remarks No-long-jump zone!!!
4913 */
4914static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4915{
4916 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
4917 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_ACTIVITY_STATE;
4918 return VINF_SUCCESS;
4919}
4920
4921
4922/**
4923 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
4924 * the current VMCS into the guest-CPU context.
4925 *
4926 * @returns VBox status code.
4927 * @param pVCpu Pointer to the VMCPU.
4928 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4929 * out-of-sync. Make sure to update the required fields
4930 * before using them.
4931 *
4932 * @remarks No-long-jump zone!!!
4933 */
4934static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4935{
4936 int rc = VINF_SUCCESS;
4937 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
4938 {
4939 uint32_t u32Val = 0;
4940 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc);
4941 pMixedCtx->SysEnter.cs = u32Val;
4942 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR;
4943 }
4944
4945 RTGCUINTREG uGCVal = 0;
4946 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
4947 {
4948 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &uGCVal); AssertRCReturn(rc, rc);
4949 pMixedCtx->SysEnter.eip = uGCVal;
4950 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR;
4951 }
4952 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
4953 {
4954 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &uGCVal); AssertRCReturn(rc, rc);
4955 pMixedCtx->SysEnter.esp = uGCVal;
4956 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR;
4957 }
4958 return rc;
4959}
4960
4961
4962/**
4963 * Saves the guest FS_BASE MSRs from the current VMCS into the guest-CPU
4964 * context.
4965 *
4966 * @returns VBox status code.
4967 * @param pVCpu Pointer to the VMCPU.
4968 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4969 * out-of-sync. Make sure to update the required fields
4970 * before using them.
4971 *
4972 * @remarks No-long-jump zone!!!
4973 */
4974static int hmR0VmxSaveGuestFSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4975{
4976 int rc = VINF_SUCCESS;
4977 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_FS_BASE_MSR))
4978 {
4979 RTGCUINTREG uVal = 0;
4980 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_FS_BASE, &uVal); AssertRCReturn(rc, rc);
4981 pMixedCtx->fs.u64Base = uVal;
4982 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_FS_BASE_MSR;
4983 }
4984 return rc;
4985}
4986
4987
4988/**
4989 * Saves the guest GS_BASE MSRs from the current VMCS into the guest-CPU
4990 * context.
4991 *
4992 * @returns VBox status code.
4993 * @param pVCpu Pointer to the VMCPU.
4994 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
4995 * out-of-sync. Make sure to update the required fields
4996 * before using them.
4997 *
4998 * @remarks No-long-jump zone!!!
4999 */
5000static int hmR0VmxSaveGuestGSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5001{
5002 int rc = VINF_SUCCESS;
5003 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GS_BASE_MSR))
5004 {
5005 RTGCUINTREG uVal = 0;
5006 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GS_BASE, &uVal); AssertRCReturn(rc, rc);
5007 pMixedCtx->gs.u64Base = uVal;
5008 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GS_BASE_MSR;
5009 }
5010 return rc;
5011}
5012
5013
5014/**
5015 * Saves the auto load/store'd guest MSRs from the current VMCS into the
5016 * guest-CPU context. Currently these are LSTAR, STAR, SFMASK and TSC_AUX.
5017 *
5018 * @returns VBox status code.
5019 * @param pVCpu Pointer to the VMCPU.
5020 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5021 * out-of-sync. Make sure to update the required fields
5022 * before using them.
5023 *
5024 * @remarks No-long-jump zone!!!
5025 */
5026static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5027{
5028 if (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)
5029 return VINF_SUCCESS;
5030
5031#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
5032 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++)
5033 {
5034 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
5035 pMsr += i;
5036 switch (pMsr->u32IndexMSR)
5037 {
5038 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;
5039 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;
5040 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;
5041 case MSR_K8_TSC_AUX: CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); break;
5042 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;
5043 case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit. */ break;
5044 default:
5045 {
5046 AssertFailed();
5047 return VERR_HM_UNEXPECTED_LD_ST_MSR;
5048 }
5049 }
5050 }
5051#endif
5052
5053 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS;
5054 return VINF_SUCCESS;
5055}
5056
5057
5058/**
5059 * Saves the guest control registers from the current VMCS into the guest-CPU
5060 * context.
5061 *
5062 * @returns VBox status code.
5063 * @param pVCpu Pointer to the VMCPU.
5064 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5065 * out-of-sync. Make sure to update the required fields
5066 * before using them.
5067 *
5068 * @remarks No-long-jump zone!!!
5069 */
5070static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5071{
5072 /* Guest CR0. Guest FPU. */
5073 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5074
5075 /* Guest CR4. */
5076 rc |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
5077 AssertRCReturn(rc, rc);
5078
5079 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
5080 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR3))
5081 {
5082 PVM pVM = pVCpu->CTX_SUFF(pVM);
5083 if ( pVM->hm.s.fNestedPaging
5084 && CPUMIsGuestPagingEnabledEx(pMixedCtx))
5085 {
5086 RTGCUINTREG uVal = 0;
5087 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &uVal);
5088 if (pMixedCtx->cr3 != uVal)
5089 {
5090 CPUMSetGuestCR3(pVCpu, uVal);
5091 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
5092 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
5093 }
5094
5095 /* We require EFER to check PAE mode. */
5096 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
5097
5098 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
5099 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR. */
5100 {
5101 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);
5102 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);
5103 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);
5104 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);
5105 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
5106 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
5107 }
5108 AssertRCReturn(rc, rc);
5109 }
5110
5111 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR3;
5112 }
5113 return rc;
5114}
5115
5116
5117/**
5118 * Reads a guest segment register from the current VMCS into the guest-CPU
5119 * context.
5120 *
5121 * @returns VBox status code.
5122 * @param pVCpu Pointer to the VMCPU.
5123 * @param idxSel Index of the selector in the VMCS.
5124 * @param idxLimit Index of the segment limit in the VMCS.
5125 * @param idxBase Index of the segment base in the VMCS.
5126 * @param idxAccess Index of the access rights of the segment in the VMCS.
5127 * @param pSelReg Pointer to the segment selector.
5128 *
5129 * @remarks No-long-jump zone!!!
5130 * @remarks Never call this function directly. Use the VMXLOCAL_READ_SEG() macro
5131 * as that takes care of whether to read from the VMCS cache or not.
5132 */
5133DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
5134 PCPUMSELREG pSelReg)
5135{
5136 uint32_t u32Val = 0;
5137 int rc = VMXReadVmcs32(idxSel, &u32Val);
5138 pSelReg->Sel = (uint16_t)u32Val;
5139 pSelReg->ValidSel = (uint16_t)u32Val;
5140 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
5141
5142 rc |= VMXReadVmcs32(idxLimit, &u32Val);
5143 pSelReg->u32Limit = u32Val;
5144
5145 RTGCUINTREG uGCVal = 0;
5146 rc |= VMXReadVmcsGstNByIdxVal(idxBase, &uGCVal);
5147 pSelReg->u64Base = uGCVal;
5148
5149 rc |= VMXReadVmcs32(idxAccess, &u32Val);
5150 pSelReg->Attr.u = u32Val;
5151 AssertRCReturn(rc, rc);
5152
5153 /*
5154 * If VT-x marks the segment as unusable, the rest of the attributes are undefined.
5155 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
5156 */
5157 if (pSelReg->Attr.u & HMVMX_SEL_UNUSABLE)
5158 {
5159 Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR);
5160 /** @todo r=ramshankar: This can't be right for CS, SS which have exceptions for
5161 * certain bits, they're not all undefined. Consider ORing
5162 * HMVMX_SEL_UNUSABLE instead? */
5163 pSelReg->Attr.u = HMVMX_SEL_UNUSABLE;
5164 }
5165 return rc;
5166}
5167
5168
5169/**
5170 * Saves the guest segment registers from the current VMCS into the guest-CPU
5171 * context.
5172 *
5173 * @returns VBox status code.
5174 * @param pVCpu Pointer to the VMCPU.
5175 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5176 * out-of-sync. Make sure to update the required fields
5177 * before using them.
5178 *
5179 * @remarks No-long-jump zone!!!
5180 */
5181static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5182{
5183#ifdef VMX_USE_CACHED_VMCS_ACCESSES
5184#define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5185 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5186 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5187#else
5188#define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5189 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5190 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5191#endif
5192
5193 int rc = VINF_SUCCESS;
5194
5195 /* Guest segment registers. */
5196 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SEGMENT_REGS))
5197 {
5198 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5199 rc |= VMXLOCAL_READ_SEG(CS, cs);
5200 rc |= VMXLOCAL_READ_SEG(SS, ss);
5201 rc |= VMXLOCAL_READ_SEG(DS, ds);
5202 rc |= VMXLOCAL_READ_SEG(ES, es);
5203 rc |= VMXLOCAL_READ_SEG(FS, fs);
5204 rc |= VMXLOCAL_READ_SEG(GS, gs);
5205 AssertRCReturn(rc, rc);
5206
5207 /* Restore segment attributes for real-on-v86 mode hack. */
5208 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5209 {
5210 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrCS.u;
5211 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrSS.u;
5212 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrDS.u;
5213 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrES.u;
5214 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrFS.u;
5215 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrGS.u;
5216 }
5217 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SEGMENT_REGS;
5218 }
5219
5220 return rc;
5221}
5222
5223
5224/**
5225 * Saves the guest descriptor table registers and task register from the current
5226 * VMCS into the guest-CPU context.
5227 *
5228 * @returns VBox status code.
5229 * @param pVCpu Pointer to the VMCPU.
5230 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5231 * out-of-sync. Make sure to update the required fields
5232 * before using them.
5233 *
5234 * @remarks No-long-jump zone!!!
5235 */
5236static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5237{
5238 int rc = VINF_SUCCESS;
5239
5240 /* Guest LDTR. */
5241 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_LDTR))
5242 {
5243 rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
5244 AssertRCReturn(rc, rc);
5245 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LDTR;
5246 }
5247
5248 /* Guest GDTR. */
5249 RTGCUINTREG uGCVal = 0;
5250 uint32_t u32Val = 0;
5251 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GDTR))
5252 {
5253 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &uGCVal);
5254 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5255 pMixedCtx->gdtr.pGdt = uGCVal;
5256 pMixedCtx->gdtr.cbGdt = u32Val;
5257 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GDTR;
5258 }
5259
5260 /* Guest IDTR. */
5261 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_IDTR))
5262 {
5263 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &uGCVal);
5264 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5265 pMixedCtx->idtr.pIdt = uGCVal;
5266 pMixedCtx->idtr.cbIdt = u32Val;
5267 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_IDTR;
5268 }
5269
5270 /* Guest TR. */
5271 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_TR))
5272 {
5273 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5274
5275 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
5276 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5277 rc |= VMXLOCAL_READ_SEG(TR, tr);
5278 AssertRCReturn(rc, rc);
5279 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_TR;
5280 }
5281 return rc;
5282}
5283
5284
5285/**
5286 * Saves the guest debug registers from the current VMCS into the guest-CPU
5287 * context.
5288 *
5289 * @returns VBox status code.
5290 * @param pVCpu Pointer to the VMCPU.
5291 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5292 * out-of-sync. Make sure to update the required fields
5293 * before using them.
5294 *
5295 * @remarks No-long-jump zone!!!
5296 */
5297static int hmR0VmxSaveGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5298{
5299 int rc = VINF_SUCCESS;
5300 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_DEBUG))
5301 {
5302 RTGCUINTREG uVal;
5303 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_DR7, &uVal); AssertRCReturn(rc, rc);
5304 pMixedCtx->dr[7] = uVal;
5305
5306 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_DEBUG;
5307 }
5308 return rc;
5309}
5310
5311
5312/**
5313 * Saves the guest APIC state from the currentl VMCS into the guest-CPU context.
5314 *
5315 * @returns VBox status code.
5316 * @param pVCpu Pointer to the VMCPU.
5317 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5318 * out-of-sync. Make sure to update the required fields
5319 * before using them.
5320 *
5321 * @remarks No-long-jump zone!!!
5322 */
5323static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5324{
5325 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
5326 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_APIC_STATE;
5327 return VINF_SUCCESS;
5328}
5329
5330
5331/**
5332 * Saves the entire guest state from the currently active VMCS into the
5333 * guest-CPU context. This essentially VMREADs all guest-data.
5334 *
5335 * @returns VBox status code.
5336 * @param pVCpu Pointer to the VMCPU.
5337 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5338 * out-of-sync. Make sure to update the required fields
5339 * before using them.
5340 */
5341static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5342{
5343 Assert(pVCpu);
5344 Assert(pMixedCtx);
5345
5346 if (pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL)
5347 return VINF_SUCCESS;
5348
5349 VMMRZCallRing3Disable(pVCpu);
5350 Assert(VMMR0IsLogFlushDisabled(pVCpu));
5351 LogFunc(("\n"));
5352
5353 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
5354 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5355
5356 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
5357 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5358
5359 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
5360 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5361
5362 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
5363 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5364
5365 rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx);
5366 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5367
5368 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
5369 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5370
5371 rc = hmR0VmxSaveGuestFSBaseMsr(pVCpu, pMixedCtx);
5372 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestFSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5373
5374 rc = hmR0VmxSaveGuestGSBaseMsr(pVCpu, pMixedCtx);
5375 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5376
5377 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
5378 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5379
5380 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
5381 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5382
5383 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
5384 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5385
5386 AssertMsg(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL,
5387 ("Missed guest state bits while saving state; residue %RX32\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
5388
5389 VMMRZCallRing3Enable(pVCpu);
5390 return rc;
5391}
5392
5393
5394/**
5395 * Check per-VM and per-VCPU force flag actions that require us to go back to
5396 * ring-3 for one reason or another.
5397 *
5398 * @returns VBox status code (information status code included).
5399 * @retval VINF_SUCCESS if we don't have any actions that require going back to
5400 * ring-3.
5401 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
5402 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
5403 * interrupts)
5404 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
5405 * all EMTs to be in ring-3.
5406 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
5407 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
5408 * to the EM loop.
5409 *
5410 * @param pVM Pointer to the VM.
5411 * @param pVCpu Pointer to the VMCPU.
5412 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5413 * out-of-sync. Make sure to update the required fields
5414 * before using them.
5415 */
5416static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5417{
5418 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5419
5420 int rc = VERR_INTERNAL_ERROR_5;
5421 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
5422 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
5423 | VMCPU_FF_REQUEST | VMCPU_FF_HM_UPDATE_CR3 | VMCPU_FF_HM_UPDATE_PAE_PDPES))
5424 {
5425 /* We need the control registers now, make sure the guest-CPU context is updated. */
5426 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
5427 AssertRCReturn(rc, rc);
5428
5429 /* Pending HM CR3 sync. */
5430 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
5431 {
5432 rc = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
5433 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3);
5434 }
5435 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
5436 {
5437 rc = PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5438 AssertRC(rc);
5439 }
5440
5441 /* Pending PGM C3 sync. */
5442 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
5443 {
5444 rc = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
5445 if (rc != VINF_SUCCESS)
5446 {
5447 AssertRC(rc);
5448 Log(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
5449 return rc;
5450 }
5451 }
5452
5453 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
5454 /* -XXX- what was that about single stepping? */
5455 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
5456 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
5457 {
5458 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
5459 rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
5460 Log(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
5461 return rc;
5462 }
5463
5464 /* Pending VM request packets, such as hardware interrupts. */
5465 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
5466 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
5467 {
5468 Log(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
5469 return VINF_EM_PENDING_REQUEST;
5470 }
5471
5472 /* Pending PGM pool flushes. */
5473 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
5474 {
5475 Log(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
5476 return VINF_PGM_POOL_FLUSH_PENDING;
5477 }
5478
5479 /* Pending DMA requests. */
5480 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
5481 {
5482 Log(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
5483 return VINF_EM_RAW_TO_R3;
5484 }
5485 }
5486
5487 /* Paranoia. */
5488 Assert(rc != VERR_EM_INTERPRETER);
5489 return VINF_SUCCESS;
5490}
5491
5492
5493/**
5494 * Converts any TRPM trap into a pending VMX event. This is typically used when
5495 * entering from ring-3 (not longjmp returns).
5496 *
5497 * @param pVCpu Pointer to the VMCPU.
5498 */
5499static void hmR0VmxTRPMTrapToPendingEvent(PVMCPU pVCpu)
5500{
5501 Assert(TRPMHasTrap(pVCpu));
5502 Assert(!pVCpu->hm.s.Event.fPending);
5503
5504 uint8_t uVector;
5505 TRPMEVENT enmTrpmEvent;
5506 RTGCUINT uErrCode;
5507 RTGCUINTPTR GCPtrFaultAddress;
5508 uint8_t cbInstr;
5509
5510 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
5511 AssertRC(rc);
5512
5513 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntrInfo. */
5514 uint32_t u32IntrInfo = uVector | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5515 if (enmTrpmEvent == TRPM_TRAP)
5516 {
5517 switch (uVector)
5518 {
5519 case X86_XCPT_BP:
5520 case X86_XCPT_OF:
5521 {
5522 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5523 break;
5524 }
5525
5526 case X86_XCPT_PF:
5527 case X86_XCPT_DF:
5528 case X86_XCPT_TS:
5529 case X86_XCPT_NP:
5530 case X86_XCPT_SS:
5531 case X86_XCPT_GP:
5532 case X86_XCPT_AC:
5533 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5534 /* no break! */
5535 default:
5536 {
5537 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5538 break;
5539 }
5540 }
5541 }
5542 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
5543 {
5544 if (uVector != X86_XCPT_NMI)
5545 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5546 else
5547 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5548 }
5549 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
5550 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5551 else
5552 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
5553
5554 rc = TRPMResetTrap(pVCpu);
5555 AssertRC(rc);
5556 Log(("Converting TRPM trap: u32IntrInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
5557 u32IntrInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
5558 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, uErrCode, GCPtrFaultAddress);
5559}
5560
5561
5562/**
5563 * Converts any pending VMX event into a TRPM trap. Typically used when leaving
5564 * VT-x to execute any instruction.
5565 *
5566 * @param pvCpu Pointer to the VMCPU.
5567 */
5568static void hmR0VmxPendingEventToTRPMTrap(PVMCPU pVCpu)
5569{
5570 Assert(pVCpu->hm.s.Event.fPending);
5571
5572 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
5573 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntrInfo);
5574 bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo);
5575 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
5576
5577 /* If a trap was already pending, we did something wrong! */
5578 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
5579
5580 TRPMEVENT enmTrapType;
5581 switch (uVectorType)
5582 {
5583 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
5584 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
5585 enmTrapType = TRPM_HARDWARE_INT;
5586 break;
5587 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
5588 enmTrapType = TRPM_SOFTWARE_INT;
5589 break;
5590 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
5591 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
5592 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
5593 enmTrapType = TRPM_TRAP;
5594 break;
5595 default:
5596 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
5597 enmTrapType = TRPM_32BIT_HACK;
5598 break;
5599 }
5600
5601 Log(("Converting pending HM event to TRPM trap uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
5602 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
5603 AssertRC(rc);
5604
5605 if (fErrorCodeValid)
5606 TRPMSetErrorCode(pVCpu, uErrorCode);
5607 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5608 && uVector == X86_XCPT_PF)
5609 {
5610 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
5611 }
5612 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5613 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5614 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5615 {
5616 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5617 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
5618 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
5619 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
5620 }
5621 pVCpu->hm.s.Event.fPending = false;
5622}
5623
5624
5625/**
5626 * Does the necessary state syncing before doing a longjmp to ring-3.
5627 *
5628 * @param pVM Pointer to the VM.
5629 * @param pVCpu Pointer to the VMCPU.
5630 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5631 * out-of-sync. Make sure to update the required fields
5632 * before using them.
5633 * @param rcExit The reason for exiting to ring-3. Can be
5634 * VINF_VMM_UNKNOWN_RING3_CALL.
5635 *
5636 * @remarks No-long-jmp zone!!!
5637 */
5638static void hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
5639{
5640 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
5641 Assert(VMMR0IsLogFlushDisabled(pVCpu));
5642
5643 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
5644 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL);
5645 AssertRC(rc);
5646
5647 /* Restore FPU state if necessary and resync on next R0 reentry .*/
5648 if (CPUMIsGuestFPUStateActive(pVCpu))
5649 {
5650 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
5651 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
5652 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
5653 }
5654
5655 /* Restore debug registers if necessary and resync on next R0 reentry. */
5656 if (CPUMIsGuestDebugStateActive(pVCpu))
5657 {
5658 CPUMR0SaveGuestDebugState(pVM, pVCpu, pMixedCtx, true /* save DR6 */);
5659 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
5660 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
5661 }
5662 else if (CPUMIsHyperDebugStateActive(pVCpu))
5663 {
5664 CPUMR0LoadHostDebugState(pVM, pVCpu);
5665 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
5666 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
5667 }
5668
5669 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
5670 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
5671 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
5672 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
5673 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
5674 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
5675}
5676
5677
5678/**
5679 * An action requires us to go back to ring-3. This function does the necessary
5680 * steps before we can safely return to ring-3. This is not the same as longjmps
5681 * to ring-3, this is voluntary.
5682 *
5683 * @param pVM Pointer to the VM.
5684 * @param pVCpu Pointer to the VMCPU.
5685 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5686 * out-of-sync. Make sure to update the required fields
5687 * before using them.
5688 * @param rcExit The reason for exiting to ring-3. Can be
5689 * VINF_VMM_UNKNOWN_RING3_CALL.
5690 */
5691static void hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
5692{
5693 Assert(pVM);
5694 Assert(pVCpu);
5695 Assert(pMixedCtx);
5696 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
5697
5698 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_GUEST_STATE))
5699 {
5700 /* We want to see what the guest-state was before VM-entry, don't resync here, as we won't continue guest execution. */
5701 return;
5702 }
5703 else if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
5704 {
5705 VMXGetActivateVMCS(&pVCpu->hm.s.vmx.lasterror.u64VMCSPhys);
5706 pVCpu->hm.s.vmx.lasterror.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
5707 pVCpu->hm.s.vmx.lasterror.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
5708 pVCpu->hm.s.vmx.lasterror.idCurrentCpu = RTMpCpuId();
5709 return;
5710 }
5711
5712 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
5713 VMMRZCallRing3Disable(pVCpu);
5714 Log(("hmR0VmxExitToRing3: rcExit=%d\n", rcExit));
5715
5716 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
5717 if (pVCpu->hm.s.Event.fPending)
5718 {
5719 hmR0VmxPendingEventToTRPMTrap(pVCpu);
5720 Assert(!pVCpu->hm.s.Event.fPending);
5721 }
5722
5723 /* Sync. the guest state. */
5724 hmR0VmxLongJmpToRing3(pVM, pVCpu, pMixedCtx, rcExit);
5725 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
5726
5727 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
5728 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
5729 | CPUM_CHANGED_LDTR
5730 | CPUM_CHANGED_GDTR
5731 | CPUM_CHANGED_IDTR
5732 | CPUM_CHANGED_TR
5733 | CPUM_CHANGED_HIDDEN_SEL_REGS);
5734
5735 /* On our way back from ring-3 the following needs to be done. */
5736 /** @todo This can change with preemption hooks. */
5737 if (rcExit == VINF_EM_RAW_INTERRUPT)
5738 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
5739 else
5740 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
5741
5742 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
5743 VMMRZCallRing3Enable(pVCpu);
5744}
5745
5746
5747/**
5748 * VMMRZCallRing3 callback wrapper which saves the guest state before we
5749 * longjump to ring-3 and possibly get preempted.
5750 *
5751 * @param pVCpu Pointer to the VMCPU.
5752 * @param enmOperation The operation causing the ring-3 longjump.
5753 * @param pvUser The user argument (pointer to the possibly
5754 * out-of-date guest-CPU context).
5755 *
5756 * @remarks Must never be called with @a enmOperation ==
5757 * VMMCALLRING3_VM_R0_ASSERTION.
5758 */
5759DECLCALLBACK(void) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
5760{
5761 /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion, */
5762 Assert(pVCpu);
5763 Assert(pvUser);
5764 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5765 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
5766
5767 VMMRZCallRing3Disable(pVCpu);
5768 Assert(VMMR0IsLogFlushDisabled(pVCpu));
5769 Log(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3\n"));
5770 hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser, VINF_VMM_UNKNOWN_RING3_CALL);
5771 VMMRZCallRing3Enable(pVCpu);
5772}
5773
5774
5775/**
5776 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
5777 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
5778 *
5779 * @returns VBox status code.
5780 * @param pVCpu Pointer to the VMCPU.
5781 */
5782DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
5783{
5784 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT))
5785 {
5786 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT))
5787 {
5788 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT;
5789 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
5790 AssertRC(rc);
5791 }
5792 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
5793}
5794
5795
5796/**
5797 * Injects any pending events into the guest if the guest is in a state to
5798 * receive them.
5799 *
5800 * @returns VBox status code (informational status codes included).
5801 * @param pVCpu Pointer to the VMCPU.
5802 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5803 * out-of-sync. Make sure to update the required fields
5804 * before using them.
5805 */
5806static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5807{
5808 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
5809 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
5810 bool fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
5811 bool fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
5812
5813 Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS));
5814 Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/
5815 && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
5816 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
5817 Assert(!TRPMHasTrap(pVCpu));
5818
5819 int rc = VINF_SUCCESS;
5820 if (pVCpu->hm.s.Event.fPending) /* First, inject any pending HM events. */
5821 {
5822 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
5823 bool fInject = true;
5824 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
5825 {
5826 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
5827 AssertRCReturn(rc, rc);
5828 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
5829 if ( fBlockInt
5830 || fBlockSti
5831 || fBlockMovSS)
5832 {
5833 fInject = false;
5834 }
5835 }
5836 else if ( uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
5837 && ( fBlockMovSS
5838 || fBlockSti))
5839 {
5840 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
5841 fInject = false;
5842 }
5843
5844 if (fInject)
5845 {
5846 Log(("Injecting pending event\n"));
5847 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.cbInstr,
5848 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState);
5849 AssertRCReturn(rc, rc);
5850 pVCpu->hm.s.Event.fPending = false;
5851 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntReinject);
5852 }
5853 else
5854 hmR0VmxSetIntWindowExitVmcs(pVCpu);
5855 } /** @todo SMI. SMIs take priority over NMIs. */
5856 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */
5857 {
5858 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
5859 if ( !fBlockMovSS
5860 && !fBlockSti)
5861 {
5862 Log(("Injecting NMI\n"));
5863 RTGCUINTPTR uIntrInfo;
5864 uIntrInfo = X86_XCPT_NMI | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5865 uIntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5866 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, uIntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */,
5867 0 /* GCPtrFaultAddress */, &uIntrState);
5868 AssertRCReturn(rc, rc);
5869 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
5870 }
5871 else
5872 hmR0VmxSetIntWindowExitVmcs(pVCpu);
5873 }
5874 else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
5875 {
5876 /* Check if there are guest external interrupts (PIC/APIC) pending and inject them if the guest can receive them. */
5877 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
5878 AssertRCReturn(rc, rc);
5879 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
5880 if ( !fBlockInt
5881 && !fBlockSti
5882 && !fBlockMovSS)
5883 {
5884 uint8_t u8Interrupt;
5885 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
5886 if (RT_SUCCESS(rc))
5887 {
5888 Log(("Injecting interrupt u8Interrupt=%#x\n", u8Interrupt));
5889 uint32_t u32IntrInfo = u8Interrupt | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5890 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5891 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */,
5892 0 /* GCPtrFaultAddress */, &uIntrState);
5893 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
5894 }
5895 else
5896 {
5897 /** @todo Does this actually happen? If not turn it into an assertion. */
5898 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
5899 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
5900 rc = VINF_SUCCESS;
5901 }
5902 }
5903 else
5904 hmR0VmxSetIntWindowExitVmcs(pVCpu);
5905 }
5906
5907 /*
5908 * Delivery pending debug exception if the guest is single-stepping. The interruptibility-state could have been changed by
5909 * hmR0VmxInjectEventVmcs() (e.g. real-on-v86 injecting software interrupts), re-evaluate it and set the BS bit.
5910 */
5911 fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
5912 fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
5913 int rc2 = VINF_SUCCESS;
5914 if ( fBlockSti
5915 || fBlockMovSS)
5916 {
5917 if (!DBGFIsStepping(pVCpu))
5918 {
5919 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
5920 if (pMixedCtx->eflags.Bits.u1TF) /* We don't have any IA32_DEBUGCTL MSR for guests. Treat as all bits 0. */
5921 {
5922 /*
5923 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD, VMX_EXIT_MTF
5924 * VMX_EXIT_APIC_WRITE, VMX_EXIT_VIRTUALIZED_EOI. See Intel spec. 27.3.4 "Saving Non-Register State".
5925 */
5926 rc2 = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
5927 }
5928 }
5929 else
5930 {
5931 /* We are single-stepping in the hypervisor debugger, clear interrupt inhibition as setting the BS bit would mean
5932 delivering a #DB to the guest upon VM-entry when it shouldn't be. */
5933 uIntrState = 0;
5934 }
5935 }
5936
5937 /*
5938 * There's no need to clear the VM entry-interruption information field here if we're not injecting anything.
5939 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
5940 */
5941 rc2 |= hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
5942 AssertRC(rc2);
5943
5944 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
5945 return rc;
5946}
5947
5948
5949/**
5950 * Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
5951 *
5952 * @param pVCpu Pointer to the VMCPU.
5953 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5954 * out-of-sync. Make sure to update the required fields
5955 * before using them.
5956 */
5957DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5958{
5959 uint32_t u32IntrInfo = X86_XCPT_UD | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5960 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5961}
5962
5963
5964/**
5965 * Injects a double-fault (#DF) exception into the VM.
5966 *
5967 * @returns VBox status code (informational status code included).
5968 * @param pVCpu Pointer to the VMCPU.
5969 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5970 * out-of-sync. Make sure to update the required fields
5971 * before using them.
5972 */
5973DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t *puIntrState)
5974{
5975 uint32_t u32IntrInfo = X86_XCPT_DF | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5976 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5977 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5978 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
5979 puIntrState);
5980}
5981
5982
5983/**
5984 * Sets a debug (#DB) exception as pending-for-injection into the VM.
5985 *
5986 * @param pVCpu Pointer to the VMCPU.
5987 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5988 * out-of-sync. Make sure to update the required fields
5989 * before using them.
5990 */
5991DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5992{
5993 uint32_t u32IntrInfo = X86_XCPT_DB | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
5994 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5995 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5996}
5997
5998
5999/**
6000 * Sets an overflow (#OF) exception as pending-for-injection into the VM.
6001 *
6002 * @param pVCpu Pointer to the VMCPU.
6003 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6004 * out-of-sync. Make sure to update the required fields
6005 * before using them.
6006 * @param cbInstr The value of RIP that is to be pushed on the guest
6007 * stack.
6008 */
6009DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
6010{
6011 uint32_t u32IntrInfo = X86_XCPT_OF | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
6012 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6013 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6014}
6015
6016
6017/**
6018 * Injects a general-protection (#GP) fault into the VM.
6019 *
6020 * @returns VBox status code (informational status code included).
6021 * @param pVCpu Pointer to the VMCPU.
6022 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6023 * out-of-sync. Make sure to update the required fields
6024 * before using them.
6025 * @param u32ErrorCode The error code associated with the #GP.
6026 */
6027DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
6028 uint32_t *puIntrState)
6029{
6030 uint32_t u32IntrInfo = X86_XCPT_GP | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
6031 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6032 if (fErrorCodeValid)
6033 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6034 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
6035 puIntrState);
6036}
6037
6038
6039/**
6040 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
6041 *
6042 * @param pVCpu Pointer to the VMCPU.
6043 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6044 * out-of-sync. Make sure to update the required fields
6045 * before using them.
6046 * @param uVector The software interrupt vector number.
6047 * @param cbInstr The value of RIP that is to be pushed on the guest
6048 * stack.
6049 */
6050DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
6051{
6052 uint32_t u32IntrInfo = uVector | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
6053 if ( uVector == X86_XCPT_BP
6054 || uVector == X86_XCPT_OF)
6055 {
6056 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6057 }
6058 else
6059 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6060 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6061}
6062
6063
6064/**
6065 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
6066 * stack.
6067 *
6068 * @returns VBox status code (information status code included).
6069 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
6070 * @param pVM Pointer to the VM.
6071 * @param pMixedCtx Pointer to the guest-CPU context.
6072 * @param uValue The value to push to the guest stack.
6073 */
6074DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
6075{
6076 /*
6077 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
6078 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
6079 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
6080 */
6081 if (pMixedCtx->sp == 1)
6082 return VINF_EM_RESET;
6083 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
6084 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
6085 AssertRCReturn(rc, rc);
6086 return rc;
6087}
6088
6089
6090/**
6091 * Injects an event into the guest upon VM-entry by updating the relevant fields
6092 * in the VM-entry area in the VMCS.
6093 *
6094 * @returns VBox status code (informational error codes included).
6095 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
6096 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
6097 *
6098 * @param pVCpu Pointer to the VMCPU.
6099 * @param pMixedCtx Pointer to the guest-CPU context. The data may
6100 * be out-of-sync. Make sure to update the required
6101 * fields before using them.
6102 * @param u64IntrInfo The VM-entry interruption-information field.
6103 * @param cbInstr The VM-entry instruction length in bytes (for
6104 * software interrupts, exceptions and privileged
6105 * software exceptions).
6106 * @param u32ErrCode The VM-entry exception error code.
6107 * @param GCPtrFaultAddress The page-fault address for #PF exceptions.
6108 * @param puIntrState Pointer to the current guest interruptibility-state.
6109 * This interruptibility-state will be updated if
6110 * necessary. This cannot not be NULL.
6111 *
6112 * @remarks No-long-jump zone!!!
6113 * @remarks Requires CR0!
6114 */
6115static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
6116 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState)
6117{
6118 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
6119 AssertMsg(u64IntrInfo >> 32 == 0, ("%#RX64\n", u64IntrInfo));
6120 Assert(puIntrState);
6121 uint32_t u32IntrInfo = (uint32_t)u64IntrInfo;
6122
6123 const uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntrInfo);
6124 const uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo);
6125
6126 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
6127 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6128 || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
6129
6130 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
6131
6132 /* We require CR0 to check if the guest is in real-mode. */
6133 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6134 AssertRCReturn(rc, rc);
6135
6136 /*
6137 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
6138 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
6139 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
6140 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
6141 */
6142 if (CPUMIsGuestInRealModeEx(pMixedCtx))
6143 {
6144 PVM pVM = pVCpu->CTX_SUFF(pVM);
6145 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
6146 {
6147 Assert(PDMVmmDevHeapIsEnabled(pVM));
6148 Assert(pVM->hm.s.vmx.pRealModeTSS);
6149
6150 /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */
6151 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6152 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
6153 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
6154 AssertRCReturn(rc, rc);
6155 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP);
6156
6157 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
6158 const size_t cbIdtEntry = 4;
6159 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
6160 {
6161 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
6162 if (uVector == X86_XCPT_DF)
6163 return VINF_EM_RESET;
6164 else if (uVector == X86_XCPT_GP)
6165 {
6166 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
6167 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, puIntrState);
6168 }
6169
6170 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
6171 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
6172 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, puIntrState);
6173 }
6174
6175 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
6176 uint16_t uGuestIp = pMixedCtx->ip;
6177 if (VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
6178 {
6179 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
6180 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
6181 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6182 }
6183 else if (VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
6184 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6185
6186 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
6187 uint16_t offIdtEntry = 0;
6188 RTSEL selIdtEntry = 0;
6189 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
6190 rc = PGMPhysSimpleReadGCPhys(pVM, &offIdtEntry, GCPhysIdtEntry, sizeof(offIdtEntry));
6191 rc |= PGMPhysSimpleReadGCPhys(pVM, &selIdtEntry, GCPhysIdtEntry + 2, sizeof(selIdtEntry));
6192 AssertRCReturn(rc, rc);
6193
6194 /* Construct the stack frame for the interrupt/exception handler. */
6195 rc = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
6196 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
6197 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
6198 AssertRCReturn(rc, rc);
6199
6200 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
6201 if (rc == VINF_SUCCESS)
6202 {
6203 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
6204 pMixedCtx->rip = offIdtEntry;
6205 pMixedCtx->cs.Sel = selIdtEntry;
6206 pMixedCtx->cs.u64Base = selIdtEntry << cbIdtEntry;
6207 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
6208 && uVector == X86_XCPT_PF)
6209 {
6210 pMixedCtx->cr2 = GCPtrFaultAddress;
6211 }
6212 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS
6213 | HM_CHANGED_GUEST_RIP
6214 | HM_CHANGED_GUEST_RFLAGS
6215 | HM_CHANGED_GUEST_RSP;
6216
6217 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
6218 if (*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
6219 {
6220 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6221 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
6222 Log(("Clearing inhibition due to STI.\n"));
6223 *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
6224 }
6225 Log(("Injecting real-mode: u32IntrInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntrInfo, u32ErrCode, cbInstr));
6226 }
6227 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
6228 return rc;
6229 }
6230 else
6231 {
6232 /*
6233 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
6234 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
6235 */
6236 u32IntrInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6237 }
6238 }
6239
6240 /* Validate. */
6241 Assert(VMX_EXIT_INTERRUPTION_INFO_VALID(u32IntrInfo)); /* Bit 31 (Valid bit) must be set by caller. */
6242 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK(u32IntrInfo)); /* Bit 12 MBZ. */
6243 Assert(!(u32IntrInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
6244
6245 /* Inject. */
6246 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntrInfo);
6247 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntrInfo))
6248 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
6249 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
6250
6251 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
6252 && uVector == X86_XCPT_PF)
6253 {
6254 pMixedCtx->cr2 = GCPtrFaultAddress;
6255 }
6256 Log(("Injecting u32IntrInfo=%#x u32ErrCode=%#x cbInstr=%#x uCR2=%#RGv\n", u32IntrInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
6257
6258 AssertRCReturn(rc, rc);
6259 return rc;
6260}
6261
6262
6263/**
6264 * Enters the VT-x session.
6265 *
6266 * @returns VBox status code.
6267 * @param pVM Pointer to the VM.
6268 * @param pVCpu Pointer to the VMCPU.
6269 * @param pCpu Pointer to the CPU info struct.
6270 */
6271VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
6272{
6273 AssertPtr(pVM);
6274 AssertPtr(pVCpu);
6275 Assert(pVM->hm.s.vmx.fSupported);
6276 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6277 NOREF(pCpu);
6278
6279 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6280
6281 /* Make sure we're in VMX root mode. */
6282 RTCCUINTREG u32HostCR4 = ASMGetCR4();
6283 if (!(u32HostCR4 & X86_CR4_VMXE))
6284 {
6285 LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
6286 return VERR_VMX_X86_CR4_VMXE_CLEARED;
6287 }
6288
6289 /* Load the active VMCS as the current one. */
6290 int rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
6291 if (RT_FAILURE(rc))
6292 return rc;
6293
6294 /** @todo this will change with preemption hooks where can can VMRESUME as long
6295 * as we're no preempted. */
6296 pVCpu->hm.s.fResumeVM = false;
6297 return VINF_SUCCESS;
6298}
6299
6300
6301/**
6302 * Leaves the VT-x session.
6303 *
6304 * @returns VBox status code.
6305 * @param pVM Pointer to the VM.
6306 * @param pVCpu Pointer to the VMCPU.
6307 * @param pCtx Pointer to the guest-CPU context.
6308 */
6309VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
6310{
6311 AssertPtr(pVCpu);
6312 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6313 NOREF(pVM);
6314 NOREF(pCtx);
6315
6316 /** @todo this will change with preemption hooks where we only VMCLEAR when
6317 * we are actually going to be preempted, not all the time like we
6318 * currently do. */
6319 /*
6320 * Sync the current VMCS (writes back internal data back into the VMCS region in memory)
6321 * and mark the VMCS launch-state as "clear".
6322 */
6323 int rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVmcs);
6324 return rc;
6325}
6326
6327
6328/**
6329 * Saves the host state in the VMCS host-state.
6330 * Sets up the VM-exit MSR-load area.
6331 *
6332 * The CPU state will be loaded from these fields on every successful VM-exit.
6333 *
6334 * @returns VBox status code.
6335 * @param pVM Pointer to the VM.
6336 * @param pVCpu Pointer to the VMCPU.
6337 *
6338 * @remarks No-long-jump zone!!!
6339 */
6340VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
6341{
6342 AssertPtr(pVM);
6343 AssertPtr(pVCpu);
6344 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6345
6346 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6347
6348 /* Nothing to do if the host-state-changed flag isn't set. This will later be optimized when preemption hooks are in place. */
6349 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT))
6350 return VINF_SUCCESS;
6351
6352 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
6353 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6354
6355 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
6356 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6357
6358 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
6359 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6360
6361 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
6362 return rc;
6363}
6364
6365
6366/**
6367 * Loads the guest state into the VMCS guest-state area. The CPU state will be
6368 * loaded from these fields on every successful VM-entry.
6369 *
6370 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas.
6371 * Sets up the VM-entry controls.
6372 * Sets up the appropriate VMX non-root function to execute guest code based on
6373 * the guest CPU mode.
6374 *
6375 * @returns VBox status code.
6376 * @param pVM Pointer to the VM.
6377 * @param pVCpu Pointer to the VMCPU.
6378 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6379 * out-of-sync. Make sure to update the required fields
6380 * before using them.
6381 *
6382 * @remarks No-long-jump zone!!!
6383 */
6384VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6385{
6386 AssertPtr(pVM);
6387 AssertPtr(pVCpu);
6388 AssertPtr(pMixedCtx);
6389 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6390
6391 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6392
6393 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
6394
6395 /* Determine real-on-v86 mode. */
6396 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
6397 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
6398 && CPUMIsGuestInRealModeEx(pMixedCtx))
6399 {
6400 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
6401 }
6402
6403 /*
6404 * Load the guest-state into the VMCS.
6405 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
6406 * Ideally, assert that the cross-dependent bits are up to date at the point of using it.
6407 */
6408 int rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
6409 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6410
6411 rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
6412 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6413
6414 rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
6415 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6416
6417 rc = hmR0VmxLoadGuestControlRegs(pVCpu, pMixedCtx);
6418 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestControlRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6419
6420 /* Must be done after CR0 is loaded (strict builds require CR0 for segment register validation checks). */
6421 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
6422 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6423
6424 rc = hmR0VmxLoadGuestDebugRegs(pVCpu, pMixedCtx);
6425 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestDebugRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6426
6427 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
6428 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6429
6430 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
6431 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6432
6433 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
6434 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestGprs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6435
6436 rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
6437 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
6438
6439 AssertMsg(!pVCpu->hm.s.fContextUseFlags,
6440 ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n",
6441 pVM, pVCpu, pVCpu->hm.s.fContextUseFlags));
6442
6443 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
6444 return rc;
6445}
6446
6447
6448/**
6449 * Does the preparations before executing guest code in VT-x.
6450 *
6451 * This may cause longjmps to ring-3 and may even result in rescheduling to the
6452 * recompiler. We must be cautious what we do here regarding committing
6453 * guest-state information into the the VMCS assuming we assuredly execute the
6454 * guest in VT-x. If we fall back to the recompiler after updating VMCS and
6455 * clearing the common-state (TRPM/forceflags), we must undo those changes so
6456 * that the recompiler can (and should) use them when it resumes guest
6457 * execution. Otherwise such operations must be done when we can no longer
6458 * exit to ring-3.
6459 *
6460 * @returns VBox status code (informational status codes included).
6461 * @retval VINF_SUCCESS if we can proceed with running the guest.
6462 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a double-fault
6463 * into the guest.
6464 * @retval VINF_* scheduling changes, we have to go back to ring-3.
6465 *
6466 * @param pVM Pointer to the VM.
6467 * @param pVCpu Pointer to the VMCPU.
6468 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6469 * out-of-sync. Make sure to update the required fields
6470 * before using them.
6471 * @param pVmxTransient Pointer to the VMX transient structure.
6472 *
6473 * @remarks Called with preemption disabled.
6474 */
6475DECLINLINE(int) hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6476{
6477 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6478
6479#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
6480 PGMRZDynMapFlushAutoSet(pVCpu);
6481#endif
6482
6483 /* Check force flag actions that might require us to go back to ring-3. */
6484 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
6485 if (rc != VINF_SUCCESS)
6486 return rc;
6487
6488 /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
6489 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
6490 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
6491 {
6492 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
6493 RTGCPHYS GCPhysApicBase;
6494 GCPhysApicBase = pMixedCtx->msrApicBase;
6495 GCPhysApicBase &= PAGE_BASE_GC_MASK;
6496
6497 /* Unalias any existing mapping. */
6498 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
6499 AssertRCReturn(rc, rc);
6500
6501 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
6502 Log(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));
6503 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
6504 AssertRCReturn(rc, rc);
6505
6506 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
6507 }
6508
6509#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
6510 /* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */
6511 pVmxTransient->uEFlags = ASMIntDisableFlags();
6512 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
6513 {
6514 ASMSetFlags(pVmxTransient->uEFlags);
6515 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
6516 /* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */
6517 return VINF_EM_RAW_INTERRUPT;
6518 }
6519 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
6520 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
6521#endif
6522
6523 /*
6524 * Evaluates and injects any pending events, toggling force-flags and updating the guest-interruptibility
6525 * state (interrupt shadow) in the VMCS. This -can- potentially be reworked to be done before disabling
6526 * interrupts and handle returning to ring-3 afterwards, but requires very careful state restoration.
6527 */
6528 /** @todo Rework event evaluation and injection to be complete separate. */
6529 if (TRPMHasTrap(pVCpu))
6530 hmR0VmxTRPMTrapToPendingEvent(pVCpu);
6531
6532 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx);
6533 AssertRCReturn(rc, rc);
6534 return rc;
6535}
6536
6537
6538/**
6539 * Prepares to run guest code in VT-x and we've committed to doing so. This
6540 * means there is no backing out to ring-3 or anywhere else at this
6541 * point.
6542 *
6543 * @param pVM Pointer to the VM.
6544 * @param pVCpu Pointer to the VMCPU.
6545 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6546 * out-of-sync. Make sure to update the required fields
6547 * before using them.
6548 * @param pVmxTransient Pointer to the VMX transient structure.
6549 *
6550 * @remarks Called with preemption disabled.
6551 * @remarks No-long-jump zone!!!
6552 */
6553DECLINLINE(void) hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6554{
6555 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6556 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6557
6558#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
6559 /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */
6560 pVmxTransient->uEFlags = ASMIntDisableFlags();
6561 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
6562#endif
6563
6564 /* Load the required guest state bits (for guest-state changes in the inner execution loop). */
6565 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT));
6566 Log4(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));
6567#ifdef HMVMX_SYNC_FULL_GUEST_STATE
6568 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
6569#endif
6570 int rc = VINF_SUCCESS;
6571 if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP)
6572 {
6573 rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
6574 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
6575 }
6576 else if (pVCpu->hm.s.fContextUseFlags)
6577 {
6578 rc = VMXR0LoadGuestState(pVM, pVCpu, pMixedCtx);
6579 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
6580 }
6581 AssertRC(rc);
6582 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
6583
6584 /* Cache the TPR-shadow for checking on every VM-exit if it might have changed. */
6585 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
6586 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
6587
6588 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
6589 || HMR0GetCurrentCpu()->idCpu != pVCpu->hm.s.idLastCpu)
6590 {
6591 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pMixedCtx);
6592 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
6593 }
6594
6595 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
6596 hmR0VmxFlushTaggedTlb(pVCpu); /* Invalidate the appropriate guest entries from the TLB. */
6597 Assert(HMR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu);
6598
6599 /*
6600 * TPR patching (only active for 32-bit guests on 64-bit capable CPUs) when the CPU does not supported virtualizing
6601 * APIC accesses feature (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).
6602 */
6603 if (pVM->hm.s.fTPRPatchingActive)
6604 {
6605 Assert(!CPUMIsGuestInLongMode(pVCpu));
6606
6607 /* Need guest's LSTAR MSR (which is part of the auto load/store MSRs in the VMCS), ensure we have the updated one. */
6608 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
6609 AssertRC(rc);
6610
6611 /* The patch code uses the LSTAR as it's not used by a guest in 32-bit mode implicitly (i.e. SYSCALL is 64-bit only). */
6612 pVmxTransient->u64LStarMsr = ASMRdMsr(MSR_K8_LSTAR);
6613 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR); /* pMixedCtx->msrLSTAR contains the guest's TPR,
6614 see hmR0VmxLoadGuestApicState(). */
6615 }
6616
6617#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
6618 /*
6619 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
6620 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
6621 */
6622 if ( (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
6623 && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
6624 {
6625 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
6626 uint64_t u64HostTscAux = 0;
6627 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux);
6628 AssertRC(rc2);
6629 ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux);
6630 }
6631#endif
6632
6633 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
6634 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
6635 to start executing. */
6636}
6637
6638
6639/**
6640 * Performs some essential restoration of state after running guest code in
6641 * VT-x.
6642 *
6643 * @param pVM Pointer to the VM.
6644 * @param pVCpu Pointer to the VMCPU.
6645 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6646 * out-of-sync. Make sure to update the required fields
6647 * before using them.
6648 * @param pVmxTransient Pointer to the VMX transient structure.
6649 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
6650 *
6651 * @remarks Called with interrupts disabled.
6652 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
6653 * unconditionally when it is safe to do so.
6654 */
6655DECLINLINE(void) hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
6656{
6657 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6658 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
6659
6660 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
6661 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
6662 pVCpu->hm.s.vmx.fUpdatedGuestState = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */
6663 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
6664 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
6665
6666 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
6667 {
6668#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
6669 /* Restore host's TSC_AUX. */
6670 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
6671 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
6672#endif
6673 /** @todo Find a way to fix hardcoding a guestimate. */
6674 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC()
6675 + pVCpu->hm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
6676 }
6677
6678 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
6679 Assert(!(ASMGetFlags() & X86_EFL_IF));
6680 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
6681
6682 /* Restore the effects of TPR patching if any. */
6683 if (pVM->hm.s.fTPRPatchingActive)
6684 {
6685 int rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
6686 AssertRC(rc);
6687 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); /* MSR_K8_LSTAR contains the guest TPR. */
6688 ASMWrMsr(MSR_K8_LSTAR, pVmxTransient->u64LStarMsr);
6689 }
6690
6691 ASMSetFlags(pVmxTransient->uEFlags); /* Enable interrupts. */
6692 pVCpu->hm.s.fResumeVM = true; /* Use VMRESUME instead of VMLAUNCH in the next run. */
6693
6694 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
6695 uint32_t uExitReason;
6696 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
6697 rc |= hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
6698 AssertRC(rc);
6699 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
6700 pVmxTransient->fVMEntryFailed = !!VMX_ENTRY_INTERRUPTION_INFO_VALID(pVmxTransient->uEntryIntrInfo);
6701
6702 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pMixedCtx);
6703 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
6704
6705 /* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */
6706 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
6707 {
6708 Log(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));
6709 return;
6710 }
6711
6712 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
6713 {
6714 /* Update the guest interruptibility-state from the VMCS. */
6715 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
6716#if defined(HMVMX_SYNC_FULL_GUEST_STATE) || defined(HMVMX_SAVE_FULL_GUEST_STATE)
6717 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6718 AssertRC(rc);
6719#endif
6720 /*
6721 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
6722 * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3; also why
6723 * we do it outside of hmR0VmxSaveGuestState() which must never cause longjmps.
6724 */
6725 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
6726 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
6727 {
6728 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
6729 AssertRC(rc);
6730 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
6731 }
6732 }
6733}
6734
6735
6736/**
6737 * Runs the guest code using VT-x.
6738 *
6739 * @returns VBox status code.
6740 * @param pVM Pointer to the VM.
6741 * @param pVCpu Pointer to the VMCPU.
6742 * @param pCtx Pointer to the guest-CPU context.
6743 *
6744 * @remarks Called with preemption disabled.
6745 */
6746VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
6747{
6748 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6749 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6750
6751 VMXTRANSIENT VmxTransient;
6752 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
6753 int rc = VERR_INTERNAL_ERROR_5;
6754 uint32_t cLoops = 0;
6755
6756 for (;; cLoops++)
6757 {
6758 Assert(!HMR0SuspendPending());
6759 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
6760 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
6761 (unsigned)RTMpCpuId(), cLoops));
6762
6763 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
6764 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
6765 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
6766 if (rc != VINF_SUCCESS)
6767 break;
6768
6769 /*
6770 * No longjmps to ring-3 from this point on!!!
6771 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
6772 * This also disables flushing of the R0-logger instance (if any).
6773 */
6774 VMMRZCallRing3Disable(pVCpu);
6775 VMMRZCallRing3RemoveNotification(pVCpu);
6776 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
6777
6778 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
6779 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
6780
6781 /*
6782 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
6783 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!!
6784 */
6785 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
6786 if (RT_UNLIKELY(rc != VINF_SUCCESS)) /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
6787 {
6788 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
6789 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
6790 return rc;
6791 }
6792
6793 /* Handle the VM-exit. */
6794 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
6795 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
6796 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
6797 HMVMX_START_EXIT_DISPATCH_PROF();
6798#ifdef HMVMX_USE_FUNCTION_TABLE
6799 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
6800#else
6801 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
6802#endif
6803 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
6804 if (rc != VINF_SUCCESS)
6805 break;
6806 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
6807 {
6808 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
6809 rc = VINF_EM_RAW_INTERRUPT;
6810 break;
6811 }
6812 }
6813
6814 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
6815 if (rc == VERR_EM_INTERPRETER)
6816 rc = VINF_EM_RAW_EMULATE_INSTR;
6817 else if (rc == VINF_EM_RESET)
6818 rc = VINF_EM_TRIPLE_FAULT;
6819 hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rc);
6820 return rc;
6821}
6822
6823
6824#ifndef HMVMX_USE_FUNCTION_TABLE
6825DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
6826{
6827 int rc;
6828 switch (rcReason)
6829 {
6830 case VMX_EXIT_EPT_MISCONFIG: rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); break;
6831 case VMX_EXIT_EPT_VIOLATION: rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); break;
6832 case VMX_EXIT_IO_INSTR: rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); break;
6833 case VMX_EXIT_CPUID: rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); break;
6834 case VMX_EXIT_RDTSC: rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); break;
6835 case VMX_EXIT_RDTSCP: rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); break;
6836 case VMX_EXIT_APIC_ACCESS: rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); break;
6837 case VMX_EXIT_XCPT_NMI: rc = hmR0VmxExitXcptNmi(pVCpu, pMixedCtx, pVmxTransient); break;
6838 case VMX_EXIT_MOV_CRX: rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); break;
6839 case VMX_EXIT_EXT_INT: rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); break;
6840 case VMX_EXIT_INT_WINDOW: rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); break;
6841 case VMX_EXIT_MWAIT: rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); break;
6842 case VMX_EXIT_MONITOR: rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); break;
6843 case VMX_EXIT_TASK_SWITCH: rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); break;
6844 case VMX_EXIT_PREEMPT_TIMER: rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); break;
6845 case VMX_EXIT_RDMSR: rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); break;
6846 case VMX_EXIT_WRMSR: rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); break;
6847 case VMX_EXIT_MOV_DRX: rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); break;
6848 case VMX_EXIT_TPR_BELOW_THRESHOLD: rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); break;
6849 case VMX_EXIT_HLT: rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); break;
6850 case VMX_EXIT_INVD: rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); break;
6851 case VMX_EXIT_INVLPG: rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); break;
6852 case VMX_EXIT_RSM: rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); break;
6853 case VMX_EXIT_MTF: rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); break;
6854 case VMX_EXIT_PAUSE: rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); break;
6855 case VMX_EXIT_XDTR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
6856 case VMX_EXIT_TR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
6857 case VMX_EXIT_WBINVD: rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); break;
6858 case VMX_EXIT_XSETBV: rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); break;
6859 case VMX_EXIT_RDRAND: rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); break;
6860 case VMX_EXIT_INVPCID: rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); break;
6861 case VMX_EXIT_GETSEC: rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); break;
6862 case VMX_EXIT_RDPMC: rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); break;
6863
6864 case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break;
6865 case VMX_EXIT_NMI_WINDOW: rc = hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient); break;
6866 case VMX_EXIT_INIT_SIGNAL: rc = hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient); break;
6867 case VMX_EXIT_SIPI: rc = hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient); break;
6868 case VMX_EXIT_IO_SMI: rc = hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient); break;
6869 case VMX_EXIT_SMI: rc = hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient); break;
6870 case VMX_EXIT_ERR_MSR_LOAD: rc = hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient); break;
6871 case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient); break;
6872 case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient); break;
6873
6874 case VMX_EXIT_VMCALL:
6875 case VMX_EXIT_VMCLEAR:
6876 case VMX_EXIT_VMLAUNCH:
6877 case VMX_EXIT_VMPTRLD:
6878 case VMX_EXIT_VMPTRST:
6879 case VMX_EXIT_VMREAD:
6880 case VMX_EXIT_VMRESUME:
6881 case VMX_EXIT_VMWRITE:
6882 case VMX_EXIT_VMXOFF:
6883 case VMX_EXIT_VMXON:
6884 case VMX_EXIT_INVEPT:
6885 case VMX_EXIT_INVVPID:
6886 case VMX_EXIT_VMFUNC:
6887 rc = hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
6888 break;
6889 default:
6890 rc = hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
6891 break;
6892 }
6893 return rc;
6894}
6895#endif
6896
6897#ifdef DEBUG
6898/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
6899# define VMX_ASSERT_PREEMPT_CPUID_VAR() \
6900 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
6901# define VMX_ASSERT_PREEMPT_CPUID() \
6902 do \
6903 { \
6904 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
6905 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
6906 } while (0)
6907
6908# define VMX_VALIDATE_EXIT_HANDLER_PARAMS() \
6909 do { \
6910 AssertPtr(pVCpu); \
6911 AssertPtr(pMixedCtx); \
6912 AssertPtr(pVmxTransient); \
6913 Assert(pVmxTransient->fVMEntryFailed == false); \
6914 Assert(ASMIntAreEnabled()); \
6915 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
6916 VMX_ASSERT_PREEMPT_CPUID_VAR(); \
6917 LogFunc(("vcpu[%u] vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n", \
6918 (unsigned)pVCpu->idCpu)); \
6919 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
6920 if (VMMR0IsLogFlushDisabled(pVCpu)) \
6921 VMX_ASSERT_PREEMPT_CPUID(); \
6922 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
6923 } while (0)
6924# define VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
6925 do { \
6926 LogFunc(("\n")); \
6927 } while(0)
6928#else /* Release builds */
6929# define VMX_VALIDATE_EXIT_HANDLER_PARAMS() do { HMVMX_STOP_EXIT_DISPATCH_PROF(); } while(0)
6930# define VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while(0)
6931#endif
6932
6933
6934/**
6935 * Advances the guest RIP after reading it from the VMCS.
6936 *
6937 * @returns VBox status code.
6938 * @param pVCpu Pointer to the VMCPU.
6939 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6940 * out-of-sync. Make sure to update the required fields
6941 * before using them.
6942 * @param pVmxTransient Pointer to the VMX transient structure.
6943 *
6944 * @remarks No-long-jump zone!!!
6945 */
6946DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6947{
6948 int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
6949 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6950 AssertRCReturn(rc, rc);
6951
6952 pMixedCtx->rip += pVmxTransient->cbInstr;
6953 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
6954 return rc;
6955}
6956
6957
6958/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6959/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
6960/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
6961/**
6962 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
6963 */
6964HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6965{
6966 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
6967 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
6968#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
6969 Assert(ASMIntAreEnabled());
6970 return VINF_SUCCESS;
6971#else
6972 return VINF_EM_RAW_INTERRUPT;
6973#endif
6974}
6975
6976
6977/**
6978 * VM-exit handler for exceptions and NMIs (VMX_EXIT_XCPT_NMI).
6979 */
6980HMVMX_EXIT_DECL hmR0VmxExitXcptNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
6981{
6982 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
6983 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
6984
6985 int rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
6986 AssertRCReturn(rc, rc);
6987
6988 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntrInfo);
6989 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXT_INT)
6990 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
6991
6992 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
6993 {
6994 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
6995 return VINF_EM_RAW_INTERRUPT;
6996 }
6997
6998 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
6999 rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
7000 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
7001 {
7002 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7003 return VINF_SUCCESS;
7004 }
7005 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
7006 {
7007 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7008 return rc;
7009 }
7010
7011 uint32_t uExitIntrInfo = pVmxTransient->uExitIntrInfo;
7012 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntrInfo);
7013 switch (uIntrType)
7014 {
7015 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
7016 Assert(uVector == X86_XCPT_DB || uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
7017 /* no break */
7018 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
7019 {
7020 switch (uVector)
7021 {
7022 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
7023 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
7024 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;
7025 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
7026 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
7027 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
7028#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
7029 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
7030 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7031 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
7032 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7033 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
7034 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7035 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
7036 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7037 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
7038 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
7039#endif
7040 default:
7041 {
7042 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7043 AssertRCReturn(rc, rc);
7044
7045 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
7046 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
7047 {
7048 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
7049 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
7050 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7051 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
7052 AssertRCReturn(rc, rc);
7053 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntrInfo),
7054 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode,
7055 0 /* GCPtrFaultAddress */);
7056 AssertRCReturn(rc, rc);
7057 }
7058 else
7059 {
7060 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
7061 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
7062 }
7063 break;
7064 }
7065 }
7066 break;
7067 }
7068
7069 case VMX_EXIT_INTERRUPTION_INFO_TYPE_DB_XCPT:
7070 default:
7071 {
7072 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE;
7073 AssertMsgFailed(("Unexpected interruption code %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntrInfo)));
7074 break;
7075 }
7076 }
7077 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
7078 return rc;
7079}
7080
7081
7082/**
7083 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
7084 */
7085HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7086{
7087 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7088
7089 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
7090 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT);
7091 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT;
7092 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
7093 AssertRCReturn(rc, rc);
7094
7095 /* Deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and resume guest execution. */
7096 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
7097 return VINF_SUCCESS;
7098}
7099
7100
7101/**
7102 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
7103 */
7104HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7105{
7106 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7107 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
7108 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7109}
7110
7111
7112/**
7113 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
7114 */
7115HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7116{
7117 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7118 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
7119 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7120}
7121
7122
7123/**
7124 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
7125 */
7126HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7127{
7128 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7129 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
7130 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7131}
7132
7133
7134/**
7135 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
7136 */
7137HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7138{
7139 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7140 PVM pVM = pVCpu->CTX_SUFF(pVM);
7141 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7142 if (RT_LIKELY(rc == VINF_SUCCESS))
7143 {
7144 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7145 Assert(pVmxTransient->cbInstr == 2);
7146 }
7147 else
7148 {
7149 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
7150 rc = VERR_EM_INTERPRETER;
7151 }
7152 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
7153 return rc;
7154}
7155
7156
7157/**
7158 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
7159 */
7160HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7161{
7162 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7163 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
7164 AssertRCReturn(rc, rc);
7165
7166 if (pMixedCtx->cr4 & X86_CR4_SMXE)
7167 return VINF_EM_RAW_EMULATE_INSTR;
7168
7169 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
7170 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7171}
7172
7173
7174/**
7175 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
7176 */
7177HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7178{
7179 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7180 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7181 AssertRCReturn(rc, rc);
7182
7183 PVM pVM = pVCpu->CTX_SUFF(pVM);
7184 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7185 if (RT_LIKELY(rc == VINF_SUCCESS))
7186 {
7187 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7188 Assert(pVmxTransient->cbInstr == 2);
7189 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
7190 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING)
7191 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7192 }
7193 else
7194 {
7195 AssertMsgFailed(("hmR0VmxExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc));
7196 rc = VERR_EM_INTERPRETER;
7197 }
7198 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
7199 return rc;
7200}
7201
7202
7203/**
7204 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
7205 */
7206HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7207{
7208 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7209 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7210 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
7211 AssertRCReturn(rc, rc);
7212
7213 PVM pVM = pVCpu->CTX_SUFF(pVM);
7214 rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
7215 if (RT_LIKELY(rc == VINF_SUCCESS))
7216 {
7217 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7218 Assert(pVmxTransient->cbInstr == 3);
7219 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
7220 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING)
7221 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7222 }
7223 else
7224 {
7225 AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
7226 rc = VERR_EM_INTERPRETER;
7227 }
7228 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
7229 return rc;
7230}
7231
7232
7233/**
7234 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
7235 */
7236HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7237{
7238 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7239 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
7240 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
7241 AssertRCReturn(rc, rc);
7242
7243 PVM pVM = pVCpu->CTX_SUFF(pVM);
7244 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7245 if (RT_LIKELY(rc == VINF_SUCCESS))
7246 {
7247 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7248 Assert(pVmxTransient->cbInstr == 2);
7249 }
7250 else
7251 {
7252 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
7253 rc = VERR_EM_INTERPRETER;
7254 }
7255 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
7256 return rc;
7257}
7258
7259
7260/**
7261 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
7262 */
7263HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7264{
7265 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7266 PVM pVM = pVCpu->CTX_SUFF(pVM);
7267 Assert(!pVM->hm.s.fNestedPaging);
7268
7269 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
7270 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
7271 AssertRCReturn(rc, rc);
7272
7273 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
7274 rc = VBOXSTRICTRC_VAL(rc2);
7275 if (RT_LIKELY(rc == VINF_SUCCESS))
7276 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7277 else
7278 {
7279 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RGv failed with %Rrc\n",
7280 pVmxTransient->uExitQualification, rc));
7281 }
7282 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
7283 return rc;
7284}
7285
7286
7287/**
7288 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
7289 */
7290HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7291{
7292 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7293 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7294 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7295 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7296 AssertRCReturn(rc, rc);
7297
7298 PVM pVM = pVCpu->CTX_SUFF(pVM);
7299 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7300 if (RT_LIKELY(rc == VINF_SUCCESS))
7301 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7302 else
7303 {
7304 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
7305 rc = VERR_EM_INTERPRETER;
7306 }
7307 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
7308 return rc;
7309}
7310
7311
7312/**
7313 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
7314 */
7315HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7316{
7317 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7318 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7319 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7320 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7321 AssertRCReturn(rc, rc);
7322
7323 PVM pVM = pVCpu->CTX_SUFF(pVM);
7324 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7325 rc = VBOXSTRICTRC_VAL(rc2);
7326 if (RT_LIKELY( rc == VINF_SUCCESS
7327 || rc == VINF_EM_HALT))
7328 {
7329 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7330 AssertRCReturn(rc3, rc3);
7331
7332 if ( rc == VINF_EM_HALT
7333 && EMShouldContinueAfterHalt(pVCpu, pMixedCtx))
7334 {
7335 rc = VINF_SUCCESS;
7336 }
7337 }
7338 else
7339 {
7340 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
7341 rc = VERR_EM_INTERPRETER;
7342 }
7343 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
7344 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
7345 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
7346 return rc;
7347}
7348
7349
7350/**
7351 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
7352 */
7353HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7354{
7355 /*
7356 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
7357 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
7358 * executing VMCALL in VMX root operation. If we get here something funny is going on.
7359 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
7360 */
7361 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7362 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7363}
7364
7365
7366/**
7367 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
7368 */
7369HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7370{
7371 /*
7372 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
7373 * root operation. If we get there there is something funny going on.
7374 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
7375 */
7376 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7377 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7378}
7379
7380
7381/**
7382 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
7383 */
7384HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7385{
7386 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
7387 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7388 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7389}
7390
7391
7392/**
7393 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
7394 */
7395HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7396{
7397 /*
7398 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
7399 * don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
7400 * See Intel spec. 25.3 "Other Causes of VM-exits".
7401 */
7402 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7403 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7404}
7405
7406
7407/**
7408 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
7409 * VM-exit.
7410 */
7411HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7412{
7413 /*
7414 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM. See Intel spec. "33.14.1 Default Treatment of
7415 * SMI Delivery" and "29.3 VMX Instructions" for "VMXON". It is -NOT- blocked in VMX non-root operation so we can potentially
7416 * still get these exits. See Intel spec. "23.8 Restrictions on VMX operation".
7417 */
7418 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7419 return VINF_SUCCESS; /** @todo r=ramshankar: correct?. */
7420}
7421
7422
7423/**
7424 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
7425 * VM-exit.
7426 */
7427HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7428{
7429 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7430 return VINF_EM_RESET;
7431}
7432
7433
7434/**
7435 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
7436 */
7437HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7438{
7439 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7440 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT);
7441 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
7442 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7443 AssertRCReturn(rc, rc);
7444
7445 pMixedCtx->rip++;
7446 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7447 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
7448 rc = VINF_SUCCESS;
7449 else
7450 rc = VINF_EM_HALT;
7451
7452 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
7453 return rc;
7454}
7455
7456
7457/**
7458 * VM-exit handler for instructions that result in a #UD exception delivered to the guest.
7459 */
7460HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7461{
7462 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7463 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
7464 return VINF_SUCCESS;
7465}
7466
7467
7468/**
7469 * VM-exit handler for expiry of the VMX preemption timer.
7470 */
7471HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7472{
7473 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7474
7475 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
7476 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7477
7478 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
7479 PVM pVM = pVCpu->CTX_SUFF(pVM);
7480 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
7481 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
7482 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
7483}
7484
7485
7486/**
7487 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
7488 */
7489HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7490{
7491 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7492 /* We expose XSETBV to the guest, fallback to the recompiler for emulation. */
7493 /** @todo check if XSETBV is supported by the recompiler. */
7494 return VERR_EM_INTERPRETER;
7495}
7496
7497
7498/**
7499 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
7500 */
7501HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7502{
7503 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7504 /* The guest should not invalidate the host CPU's TLBs, fallback to recompiler. */
7505 /** @todo implement EMInterpretInvpcid() */
7506 return VERR_EM_INTERPRETER;
7507}
7508
7509
7510/**
7511 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
7512 * Error VM-exit.
7513 */
7514HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7515{
7516 uint32_t uIntrState;
7517 HMVMXHCUINTREG uHCReg;
7518 uint64_t u64Val;
7519 uint32_t u32Val;
7520
7521 int rc = hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
7522 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
7523 rc |= hmR0VmxReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
7524 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
7525 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
7526 AssertRCReturn(rc, rc);
7527
7528 Log(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntrInfo));
7529 Log(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
7530 Log(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
7531 Log(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));
7532
7533 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
7534 Log(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
7535 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
7536 Log(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
7537 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
7538 Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
7539 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
7540 Log(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
7541 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
7542 Log(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
7543 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
7544 Log(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
7545
7546 PVM pVM = pVCpu->CTX_SUFF(pVM);
7547 HMDumpRegs(pVM, pVCpu, pMixedCtx);
7548
7549 return VERR_VMX_INVALID_GUEST_STATE;
7550}
7551
7552
7553/**
7554 * VM-exit handler for VM-entry failure due to an MSR-load
7555 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
7556 */
7557HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7558{
7559 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7560 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7561}
7562
7563
7564/**
7565 * VM-exit handler for VM-entry failure due to a machine-check event
7566 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
7567 */
7568HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7569{
7570 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7571 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7572}
7573
7574
7575/**
7576 * VM-exit handler for all undefined reasons. Should never ever happen.. in
7577 * theory.
7578 */
7579HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7580{
7581 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
7582 return VERR_VMX_UNDEFINED_EXIT_CODE;
7583}
7584
7585
7586/**
7587 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
7588 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
7589 * Conditional VM-exit.
7590 */
7591HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7592{
7593 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7594 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
7595 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
7596 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
7597 return VERR_EM_INTERPRETER;
7598 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7599 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7600}
7601
7602
7603/**
7604 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
7605 */
7606HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7607{
7608 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7609 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
7610 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
7611 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
7612 return VERR_EM_INTERPRETER;
7613 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7614 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7615}
7616
7617
7618/**
7619 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
7620 */
7621HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7622{
7623 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7624 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
7625 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7626 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7627 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7628 AssertRCReturn(rc, rc);
7629
7630 PVM pVM = pVCpu->CTX_SUFF(pVM);
7631 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7632 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
7633 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
7634 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
7635
7636 if (RT_LIKELY(rc == VINF_SUCCESS))
7637 {
7638 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7639 Assert(pVmxTransient->cbInstr == 2);
7640 }
7641 return rc;
7642}
7643
7644
7645/**
7646 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
7647 */
7648HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7649{
7650 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7651 PVM pVM = pVCpu->CTX_SUFF(pVM);
7652 int rc = VINF_SUCCESS;
7653
7654 /* If TPR patching is active, LSTAR holds the guest TPR, writes to it must be propagated to the APIC. */
7655 if ( pVM->hm.s.fTPRPatchingActive
7656 && pMixedCtx->ecx == MSR_K8_LSTAR)
7657 {
7658 Assert(!CPUMIsGuestInLongModeEx(pMixedCtx)); /* Requires EFER but it's always up-to-date. */
7659 if ((pMixedCtx->eax & 0xff) != pVmxTransient->u8GuestTpr)
7660 {
7661 rc = PDMApicSetTPR(pVCpu, pMixedCtx->eax & 0xff);
7662 AssertRC(rc);
7663 }
7664
7665 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7666 Assert(pVmxTransient->cbInstr == 2);
7667 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
7668 return VINF_SUCCESS;
7669 }
7670
7671 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
7672 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7673 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7674 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7675 AssertRCReturn(rc, rc);
7676 Log(("ecx=%#RX32\n", pMixedCtx->ecx));
7677
7678 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
7679 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
7680 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
7681
7682 if (RT_LIKELY(rc == VINF_SUCCESS))
7683 {
7684 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7685
7686 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
7687 if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START
7688 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
7689 {
7690 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_APIC_STATE);
7691 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
7692 }
7693 else if (pMixedCtx->ecx == MSR_K6_EFER) /* EFER is the only MSR we auto-load but don't allow write-passthrough. */
7694 {
7695 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
7696 AssertRCReturn(rc, rc);
7697 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS;
7698 }
7699 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
7700 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
7701
7702 /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */
7703 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)))
7704 {
7705 switch (pMixedCtx->ecx)
7706 {
7707 case MSR_IA32_SYSENTER_CS: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_CS_MSR; break;
7708 case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;
7709 case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;
7710 case MSR_K8_FS_BASE: /* no break */
7711 case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS; break;
7712 /* MSR_K8_KERNEL_GS_BASE: Nothing to do as it's not part of the VMCS. Manually loaded each time on VM-entry. */
7713 }
7714 }
7715#ifdef VBOX_STRICT
7716 else
7717 {
7718 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
7719 switch (pMixedCtx->ecx)
7720 {
7721 case MSR_IA32_SYSENTER_CS:
7722 case MSR_IA32_SYSENTER_EIP:
7723 case MSR_IA32_SYSENTER_ESP:
7724 case MSR_K8_FS_BASE:
7725 case MSR_K8_GS_BASE:
7726 {
7727 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
7728 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7729 }
7730
7731 case MSR_K8_LSTAR:
7732 case MSR_K6_STAR:
7733 case MSR_K8_SF_MASK:
7734 case MSR_K8_TSC_AUX:
7735 case MSR_K8_KERNEL_GS_BASE:
7736 {
7737 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
7738 pMixedCtx->ecx));
7739 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7740 }
7741 }
7742 }
7743#endif /* VBOX_STRICT */
7744 }
7745 return rc;
7746}
7747
7748
7749/**
7750 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
7751 */
7752HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7753{
7754 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7755 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT. */
7756 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
7757 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
7758 return VERR_EM_INTERPRETER;
7759 AssertMsgFailed(("Unexpected PAUSE exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
7760 return VERR_VMX_UNEXPECTED_EXIT_CODE;
7761}
7762
7763
7764/**
7765 * VM-exit handler for when the TPR value is lowered below the specified
7766 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
7767 */
7768HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7769{
7770 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7771 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW);
7772
7773 /*
7774 * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
7775 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and
7776 * resume guest execution.
7777 */
7778 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
7779 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
7780 return VINF_SUCCESS;
7781}
7782
7783
7784/**
7785 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
7786 * VM-exit.
7787 *
7788 * @retval VINF_SUCCESS when guest execution can continue.
7789 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
7790 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
7791 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
7792 * recompiler.
7793 */
7794HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7795{
7796 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7797 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
7798 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
7799 AssertRCReturn(rc, rc);
7800
7801 const RTGCUINTPTR uExitQualification = pVmxTransient->uExitQualification;
7802 const uint32_t uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
7803 PVM pVM = pVCpu->CTX_SUFF(pVM);
7804 switch (uAccessType)
7805 {
7806 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */
7807 {
7808#if 0
7809 /* EMInterpretCRxWrite() references a lot of guest state (EFER, RFLAGS, Segment Registers, etc.) Sync entire state */
7810 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
7811#else
7812 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
7813 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
7814 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7815#endif
7816 AssertRCReturn(rc, rc);
7817
7818 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
7819 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
7820 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
7821 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
7822
7823 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
7824 {
7825 case 0: /* CR0 */
7826 Log(("CRX CR0 write rc=%d CR0=%#RGv\n", rc, pMixedCtx->cr0));
7827 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
7828 break;
7829 case 2: /* C2 **/
7830 /* Nothing to do here, CR2 it's not part of the VMCS. */
7831 break;
7832 case 3: /* CR3 */
7833 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
7834 Log(("CRX CR3 write rc=%d CR3=%#RGv\n", rc, pMixedCtx->cr3));
7835 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
7836 break;
7837 case 4: /* CR4 */
7838 Log(("CRX CR4 write rc=%d CR4=%#RGv\n", rc, pMixedCtx->cr4));
7839 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
7840 break;
7841 case 8: /* CR8 */
7842 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
7843 /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */
7844 /* We don't need to update HM_CHANGED_VMX_GUEST_APIC_STATE here as this -cannot- happen with TPR shadowing. */
7845 break;
7846 default:
7847 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
7848 break;
7849 }
7850
7851 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
7852 break;
7853 }
7854
7855 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */
7856 {
7857 /* EMInterpretCRxRead() requires EFER MSR, CS. */
7858 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7859 AssertRCReturn(rc, rc);
7860 Assert( !pVM->hm.s.fNestedPaging
7861 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
7862 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
7863
7864 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
7865 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
7866 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
7867
7868 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
7869 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
7870 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
7871 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
7872 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
7873 Log(("CRX CR%d Read access rc=%d\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), rc));
7874 break;
7875 }
7876
7877 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
7878 {
7879 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7880 AssertRCReturn(rc, rc);
7881 rc = EMInterpretCLTS(pVM, pVCpu);
7882 AssertRCReturn(rc, rc);
7883 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
7884 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
7885 Log(("CRX CLTS write rc=%d\n", rc));
7886 break;
7887 }
7888
7889 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
7890 {
7891 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7892 AssertRCReturn(rc, rc);
7893 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
7894 if (RT_LIKELY(rc == VINF_SUCCESS))
7895 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
7896 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
7897 Log(("CRX LMSW write rc=%d\n", rc));
7898 break;
7899 }
7900
7901 default:
7902 {
7903 AssertMsgFailed(("Invalid access-type in Mov CRx exit qualification %#x\n", uAccessType));
7904 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
7905 }
7906 }
7907
7908 /* Validate possible error codes. */
7909 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_CHANGE_MODE || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_SYNC_CR3
7910 || rc == VERR_VMX_UNEXPECTED_EXCEPTION);
7911 if (RT_SUCCESS(rc))
7912 {
7913 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
7914 AssertRCReturn(rc2, rc2);
7915 }
7916
7917 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
7918 return rc;
7919}
7920
7921
7922/**
7923 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
7924 * VM-exit.
7925 */
7926HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7927{
7928 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
7929 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
7930
7931 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
7932 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7933 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
7934 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
7935 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
7936 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
7937 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
7938 AssertRCReturn(rc, rc);
7939 Log(("CS:RIP=%04x:%#RGv\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
7940
7941 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
7942 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
7943 uint32_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
7944 bool fIOWrite = (VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
7945 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
7946 bool fIOString = (VMX_EXIT_QUALIFICATION_IO_STRING(pVmxTransient->uExitQualification) == 1);
7947 Assert(uIOWidth == 0 || uIOWidth == 1 || uIOWidth == 3);
7948
7949 /* I/O operation lookup arrays. */
7950 static const uint32_t s_aIOSize[4] = { 1, 2, 0, 4 }; /* Size of the I/O Accesses. */
7951 static const uint32_t s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
7952
7953 const uint32_t cbSize = s_aIOSize[uIOWidth];
7954 const uint32_t cbInstr = pVmxTransient->cbInstr;
7955 PVM pVM = pVCpu->CTX_SUFF(pVM);
7956 if (fIOString)
7957 {
7958 /* INS/OUTS - I/O String instruction. */
7959 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
7960 /** @todo for now manually disassemble later optimize by getting the fields from
7961 * the VMCS. */
7962 /** @todo VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR contains the flat pointer
7963 * operand of the instruction. VMX_VMCS32_RO_EXIT_INSTR_INFO contains
7964 * segment prefix info. */
7965 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
7966 if (RT_SUCCESS(rc))
7967 {
7968 if (fIOWrite)
7969 {
7970 VBOXSTRICTRC rc2 = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
7971 (DISCPUMODE)pDis->uAddrMode, cbSize);
7972 rc = VBOXSTRICTRC_VAL(rc2);
7973 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
7974 }
7975 else
7976 {
7977 VBOXSTRICTRC rc2 = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
7978 (DISCPUMODE)pDis->uAddrMode, cbSize);
7979 rc = VBOXSTRICTRC_VAL(rc2);
7980 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
7981 }
7982 }
7983 else
7984 {
7985 AssertMsg(rc == VERR_EM_INTERPRETER, ("rc=%Rrc RIP %#RX64\n", rc, pMixedCtx->rip));
7986 rc = VINF_EM_RAW_EMULATE_INSTR;
7987 }
7988 }
7989 else
7990 {
7991 /* IN/OUT - I/O instruction. */
7992 const uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
7993 Assert(!VMX_EXIT_QUALIFICATION_IO_REP(pVmxTransient->uExitQualification));
7994 if (fIOWrite)
7995 {
7996 VBOXSTRICTRC rc2 = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbSize);
7997 rc = VBOXSTRICTRC_VAL(rc2);
7998 if (rc == VINF_IOM_R3_IOPORT_WRITE)
7999 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbSize);
8000 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
8001 }
8002 else
8003 {
8004 uint32_t u32Result = 0;
8005 VBOXSTRICTRC rc2 = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbSize);
8006 rc = VBOXSTRICTRC_VAL(rc2);
8007 if (IOM_SUCCESS(rc))
8008 {
8009 /* Save result of I/O IN instr. in AL/AX/EAX. */
8010 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
8011 }
8012 else if (rc == VINF_IOM_R3_IOPORT_READ)
8013 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbSize);
8014 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
8015 }
8016 }
8017
8018 if (IOM_SUCCESS(rc))
8019 {
8020 pMixedCtx->rip += cbInstr;
8021 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
8022 if (RT_LIKELY(rc == VINF_SUCCESS))
8023 {
8024 rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx); /* For DR7. */
8025 AssertRCReturn(rc, rc);
8026
8027 /* If any IO breakpoints are armed, then we should check if a debug trap needs to be generated. */
8028 if (pMixedCtx->dr[7] & X86_DR7_ENABLED_MASK)
8029 {
8030 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
8031 for (unsigned i = 0; i < 4; i++)
8032 {
8033 uint32_t uBPLen = s_aIOSize[X86_DR7_GET_LEN(pMixedCtx->dr[7], i)];
8034 if ( ( uIOPort >= pMixedCtx->dr[i]
8035 && uIOPort < pMixedCtx->dr[i] + uBPLen)
8036 && (pMixedCtx->dr[7] & (X86_DR7_L(i) | X86_DR7_G(i)))
8037 && (pMixedCtx->dr[7] & X86_DR7_RW(i, X86_DR7_RW_IO)) == X86_DR7_RW(i, X86_DR7_RW_IO))
8038 {
8039 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8040 uint64_t uDR6 = ASMGetDR6();
8041
8042 /* Clear all breakpoint status flags and set the one we just hit. */
8043 uDR6 &= ~(X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3);
8044 uDR6 |= (uint64_t)RT_BIT(i);
8045
8046 /*
8047 * Note: AMD64 Architecture Programmer's Manual 13.1:
8048 * Bits 15:13 of the DR6 register is never cleared by the processor and must
8049 * be cleared by software after the contents have been read.
8050 */
8051 ASMSetDR6(uDR6);
8052
8053 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
8054 pMixedCtx->dr[7] &= ~X86_DR7_GD;
8055
8056 /* Paranoia. */
8057 pMixedCtx->dr[7] &= 0xffffffff; /* Upper 32 bits reserved. */
8058 pMixedCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* MBZ. */
8059 pMixedCtx->dr[7] |= 0x400; /* MB1. */
8060
8061 /* Resync DR7 */
8062 /** @todo probably cheaper to just reload DR7, nothing else needs changing. */
8063 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
8064
8065 /* Set #DB to be injected into the VM and continue guest execution. */
8066 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
8067 break;
8068 }
8069 }
8070 }
8071 }
8072 }
8073
8074#ifdef DEBUG
8075 if (rc == VINF_IOM_R3_IOPORT_READ)
8076 Assert(!fIOWrite);
8077 else if (rc == VINF_IOM_R3_IOPORT_WRITE)
8078 Assert(fIOWrite);
8079 else
8080 {
8081 AssertMsg( RT_FAILURE(rc)
8082 || rc == VINF_SUCCESS
8083 || rc == VINF_EM_RAW_EMULATE_INSTR
8084 || rc == VINF_EM_RAW_GUEST_TRAP
8085 || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", rc));
8086 }
8087#endif
8088
8089 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
8090 return rc;
8091}
8092
8093
8094/**
8095 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
8096 * VM-exit.
8097 */
8098HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8099{
8100 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8101
8102 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
8103 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8104 AssertRCReturn(rc, rc);
8105 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
8106 {
8107 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
8108 AssertRCReturn(rc, rc);
8109 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
8110 {
8111 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
8112 /* Software interrupts and exceptions will be regenerated when the recompiler restarts the instruction. */
8113 if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
8114 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
8115 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
8116 {
8117 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
8118 bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
8119
8120 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */
8121 Assert(!pVCpu->hm.s.Event.fPending);
8122 pVCpu->hm.s.Event.fPending = true;
8123 pVCpu->hm.s.Event.u64IntrInfo = pVmxTransient->uIdtVectoringInfo;
8124 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
8125 AssertRCReturn(rc, rc);
8126 if (fErrorCodeValid)
8127 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
8128 else
8129 pVCpu->hm.s.Event.u32ErrCode = 0;
8130 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
8131 && uVector == X86_XCPT_PF)
8132 {
8133 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2;
8134 }
8135 Log(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
8136 }
8137 }
8138 }
8139 /** @todo Emulate task switch someday, currently just going back to ring-3 for
8140 * emulation. */
8141 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
8142 return VERR_EM_INTERPRETER;
8143}
8144
8145
8146/**
8147 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
8148 */
8149HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8150{
8151 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8152 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG);
8153 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
8154 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
8155 AssertRCReturn(rc, rc);
8156 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
8157 return VINF_EM_DBG_STOP;
8158}
8159
8160
8161/**
8162 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
8163 */
8164HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8165{
8166 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8167
8168 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8169 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8170 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
8171 return VINF_SUCCESS;
8172 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8173 return rc;
8174
8175#if 0
8176 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
8177 * just sync the whole thing. */
8178 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8179#else
8180 /* Aggressive state sync. for now. */
8181 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8182 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8183 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8184#endif
8185 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8186 AssertRCReturn(rc, rc);
8187
8188 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
8189 uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
8190 switch (uAccessType)
8191 {
8192 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
8193 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
8194 {
8195 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
8196 && VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) == 0x80)
8197 {
8198 AssertMsgFailed(("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
8199 }
8200
8201 RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
8202 GCPhys &= PAGE_BASE_GC_MASK;
8203 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
8204 PVM pVM = pVCpu->CTX_SUFF(pVM);
8205 Log(("ApicAccess uAccessType=%#x GCPhys=%RGp Off=%#x\n", uAccessType, GCPhys,
8206 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
8207
8208 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu,
8209 (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
8210 CPUMCTX2CORE(pMixedCtx), GCPhys);
8211 rc = VBOXSTRICTRC_VAL(rc2);
8212 Log(("ApicAccess rc=%d\n", rc));
8213 if ( rc == VINF_SUCCESS
8214 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8215 || rc == VERR_PAGE_NOT_PRESENT)
8216 {
8217 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8218 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8219 rc = VINF_SUCCESS;
8220 }
8221 break;
8222 }
8223
8224 default:
8225 Log(("ApicAccess uAccessType=%#x\n", uAccessType));
8226 rc = VINF_EM_RAW_EMULATE_INSTR;
8227 break;
8228 }
8229
8230 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
8231 return rc;
8232}
8233
8234
8235/**
8236 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
8237 * VM-exit.
8238 */
8239HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8240{
8241 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8242
8243 /* We should -not- get this VM-exit if the guest is debugging. */
8244 if (CPUMIsGuestDebugStateActive(pVCpu))
8245 {
8246 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8247 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8248 }
8249
8250 int rc = VERR_INTERNAL_ERROR_5;
8251 if ( !DBGFIsStepping(pVCpu)
8252 && !CPUMIsHyperDebugStateActive(pVCpu))
8253 {
8254 /* Don't intercept MOV DRx. */
8255 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
8256 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
8257 AssertRCReturn(rc, rc);
8258
8259 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
8260 PVM pVM = pVCpu->CTX_SUFF(pVM);
8261 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
8262 AssertRC(rc);
8263 Assert(CPUMIsGuestDebugStateActive(pVCpu));
8264
8265#ifdef VBOX_WITH_STATISTICS
8266 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8267 AssertRCReturn(rc, rc);
8268 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
8269 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
8270 else
8271 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
8272#endif
8273 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
8274 return VINF_SUCCESS;
8275 }
8276
8277 /*
8278 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date, see
8279 * hmR0VmxSaveGuestAutoLoadStoreMsrs(). Update only the segment registers from the CPU.
8280 */
8281 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8282 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8283 AssertRCReturn(rc, rc);
8284
8285 PVM pVM = pVCpu->CTX_SUFF(pVM);
8286 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
8287 {
8288 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8289 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
8290 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
8291 if (RT_SUCCESS(rc))
8292 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
8293 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
8294 }
8295 else
8296 {
8297 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
8298 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
8299 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
8300 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
8301 }
8302
8303 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
8304 if (RT_SUCCESS(rc))
8305 {
8306 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8307 AssertRCReturn(rc2, rc2);
8308 }
8309 return rc;
8310}
8311
8312
8313/**
8314 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
8315 * Conditional VM-exit.
8316 */
8317HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8318{
8319 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8320 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
8321
8322 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8323 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8324 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
8325 return VINF_SUCCESS;
8326 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8327 return rc;
8328
8329 RTGCPHYS GCPhys = 0;
8330 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
8331
8332#if 0
8333 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
8334#else
8335 /* Aggressive state sync. for now. */
8336 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8337 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8338 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8339#endif
8340 AssertRCReturn(rc, rc);
8341
8342 /*
8343 * If we succeed, resume guest execution.
8344 * If we fail in interpreting the instruction because we couldn't get the guest physical address
8345 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
8346 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
8347 * weird case. See @bugref{6043}.
8348 */
8349 PVM pVM = pVCpu->CTX_SUFF(pVM);
8350 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
8351 rc = VBOXSTRICTRC_VAL(rc2);
8352 Log(("EPT misconfig at %#RGv RIP=%#RGv rc=%d\n", GCPhys, pMixedCtx->rip, rc));
8353 if ( rc == VINF_SUCCESS
8354 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8355 || rc == VERR_PAGE_NOT_PRESENT)
8356 {
8357 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8358 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8359 return VINF_SUCCESS;
8360 }
8361 return rc;
8362}
8363
8364
8365/**
8366 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
8367 * VM-exit.
8368 */
8369HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8370{
8371 VMX_VALIDATE_EXIT_HANDLER_PARAMS();
8372 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
8373
8374 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8375 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8376 if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
8377 return VINF_SUCCESS;
8378 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8379 return rc;
8380
8381 RTGCPHYS GCPhys = 0;
8382 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
8383 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8384#if 0
8385 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
8386#else
8387 /* Aggressive state sync. for now. */
8388 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8389 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8390 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8391#endif
8392 AssertRCReturn(rc, rc);
8393
8394 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
8395 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RGv", pVmxTransient->uExitQualification));
8396
8397 RTGCUINT uErrorCode = 0;
8398 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
8399 uErrorCode |= X86_TRAP_PF_ID;
8400 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
8401 uErrorCode |= X86_TRAP_PF_RW;
8402 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
8403 uErrorCode |= X86_TRAP_PF_P;
8404
8405 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
8406
8407 Log(("EPT violation %#x at %#RGv ErrorCode %#x CS:EIP=%04x:%#RX64\n", (uint32_t)pVmxTransient->uExitQualification, GCPhys,
8408 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
8409
8410 /* Handle the pagefault trap for the nested shadow table. */
8411 PVM pVM = pVCpu->CTX_SUFF(pVM);
8412 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
8413 TRPMResetTrap(pVCpu);
8414
8415 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
8416 if ( rc == VINF_SUCCESS
8417 || rc == VERR_PAGE_TABLE_NOT_PRESENT
8418 || rc == VERR_PAGE_NOT_PRESENT)
8419 {
8420 /* Successfully synced our shadow page tables or emulation MMIO instruction. */
8421 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
8422 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8423 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8424 return VINF_SUCCESS;
8425 }
8426
8427 Log(("EPT return to ring-3 rc=%d\n"));
8428 return rc;
8429}
8430
8431
8432/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8433/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
8434/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8435/**
8436 * VM-exit exception handler for #MF (Math Fault: floating point exception).
8437 */
8438static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8439{
8440 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8441 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
8442
8443 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8444 AssertRCReturn(rc, rc);
8445
8446 if (!(pMixedCtx->cr0 & X86_CR0_NE))
8447 {
8448 /* Old-style FPU error reporting needs some extra work. */
8449 /** @todo don't fall back to the recompiler, but do it manually. */
8450 return VERR_EM_INTERPRETER;
8451 }
8452 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8453 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8454 return rc;
8455}
8456
8457
8458/**
8459 * VM-exit exception handler for #BP (Breakpoint exception).
8460 */
8461static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8462{
8463 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8464 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
8465
8466 /** @todo Try optimize this by not saving the entire guest state unless
8467 * really needed. */
8468 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8469 AssertRCReturn(rc, rc);
8470
8471 PVM pVM = pVCpu->CTX_SUFF(pVM);
8472 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8473 if (rc == VINF_EM_RAW_GUEST_TRAP)
8474 {
8475 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8476 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8477 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8478 AssertRCReturn(rc, rc);
8479
8480 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8481 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8482 }
8483
8484 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
8485 return rc;
8486}
8487
8488
8489/**
8490 * VM-exit exception handler for #DB (Debug exception).
8491 */
8492static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8493{
8494 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8495 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
8496
8497 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8498 AssertRCReturn(rc, rc);
8499
8500 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
8501 uint64_t uDR6 = X86_DR6_INIT_VAL;
8502 uDR6 |= (pVmxTransient->uExitQualification
8503 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
8504 PVM pVM = pVCpu->CTX_SUFF(pVM);
8505 rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6);
8506 if (rc == VINF_EM_RAW_GUEST_TRAP)
8507 {
8508 /* DR6, DR7.GD and IA32_DEBUGCTL.LBR are not updated yet. See Intel spec. 27.1 "Architectural State before a VM-Exit". */
8509 pMixedCtx->dr[6] = uDR6;
8510
8511 if (CPUMIsGuestDebugStateActive(pVCpu))
8512 ASMSetDR6(pMixedCtx->dr[6]);
8513
8514 rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx);
8515
8516 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
8517 pMixedCtx->dr[7] &= ~X86_DR7_GD;
8518
8519 /* Paranoia. */
8520 pMixedCtx->dr[7] &= 0xffffffff; /* upper 32 bits reserved */
8521 pMixedCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
8522 pMixedCtx->dr[7] |= 0x400; /* must be one */
8523
8524 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
8525 AssertRCReturn(rc,rc);
8526
8527 int rc2 = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8528 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8529 rc2 |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8530 AssertRCReturn(rc2, rc2);
8531 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8532 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8533 rc = VINF_SUCCESS;
8534 }
8535
8536 return rc;
8537}
8538
8539
8540/**
8541 * VM-exit exception handler for #NM (Device-not-available exception: floating
8542 * point exception).
8543 */
8544static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8545{
8546 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8547
8548#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
8549 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
8550#endif
8551
8552 /* We require CR0 and EFER. EFER is always up-to-date. */
8553 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8554 AssertRCReturn(rc, rc);
8555
8556 /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */
8557 PVM pVM = pVCpu->CTX_SUFF(pVM);
8558 rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8559 if (rc == VINF_SUCCESS)
8560 {
8561 Assert(CPUMIsGuestFPUStateActive(pVCpu));
8562 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
8563 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
8564 return VINF_SUCCESS;
8565 }
8566
8567 /* Forward #NM to the guest. */
8568 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
8569 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8570 AssertRCReturn(rc, rc);
8571 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8572 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
8573 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
8574 return rc;
8575}
8576
8577
8578/**
8579 * VM-exit exception handler for #GP (General-protection exception).
8580 *
8581 * @remarks Requires pVmxTransient->uExitIntrInfo to be up-to-date.
8582 */
8583static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8584{
8585 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8586 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
8587
8588 int rc = VERR_INTERNAL_ERROR_5;
8589 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
8590 {
8591#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
8592 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
8593 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8594 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8595 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8596 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8597 AssertRCReturn(rc, rc);
8598 Log(("#GP Gst: RIP %#RX64 ErrorCode=%#x CR0=%#RGv CPL=%u\n", pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode,
8599 pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu)));
8600 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8601 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8602 return rc;
8603#else
8604 /* We don't intercept #GP. */
8605 AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
8606 return VERR_VMX_UNEXPECTED_EXCEPTION;
8607#endif
8608 }
8609
8610 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
8611 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
8612
8613 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
8614 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8615 AssertRCReturn(rc, rc);
8616
8617 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
8618 uint32_t cbOp = 0;
8619 PVM pVM = pVCpu->CTX_SUFF(pVM);
8620 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
8621 if (RT_SUCCESS(rc))
8622 {
8623 rc = VINF_SUCCESS;
8624 Assert(cbOp == pDis->cbInstr);
8625 Log(("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
8626 switch (pDis->pCurInstr->uOpcode)
8627 {
8628 case OP_CLI:
8629 pMixedCtx->eflags.Bits.u1IF = 0;
8630 pMixedCtx->rip += pDis->cbInstr;
8631 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
8632 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
8633 break;
8634
8635 case OP_STI:
8636 pMixedCtx->eflags.Bits.u1IF = 1;
8637 pMixedCtx->rip += pDis->cbInstr;
8638 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
8639 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
8640 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
8641 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
8642 break;
8643
8644 case OP_HLT:
8645 rc = VINF_EM_HALT;
8646 pMixedCtx->rip += pDis->cbInstr;
8647 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
8648 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
8649 break;
8650
8651 case OP_POPF:
8652 {
8653 Log(("POPF CS:RIP %04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
8654 uint32_t cbParm = 0;
8655 uint32_t uMask = 0;
8656 if (pDis->fPrefix & DISPREFIX_OPSIZE)
8657 {
8658 cbParm = 4;
8659 uMask = 0xffffffff;
8660 }
8661 else
8662 {
8663 cbParm = 2;
8664 uMask = 0xffff;
8665 }
8666
8667 /* Get the stack pointer & pop the contents of the stack onto EFlags. */
8668 RTGCPTR GCPtrStack = 0;
8669 X86EFLAGS uEflags;
8670 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
8671 &GCPtrStack);
8672 if (RT_SUCCESS(rc))
8673 {
8674 Assert(sizeof(uEflags.u32) >= cbParm);
8675 uEflags.u32 = 0;
8676 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &uEflags.u32, cbParm);
8677 }
8678 if (RT_FAILURE(rc))
8679 {
8680 rc = VERR_EM_INTERPRETER;
8681 break;
8682 }
8683 Log(("POPF %x -> %#RGv mask=%x RIP=%#RX64\n", uEflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
8684 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
8685 | (uEflags.u32 & X86_EFL_POPF_BITS & uMask);
8686 /* The RF bit is always cleared by POPF; see Intel Instruction reference for POPF. */
8687 pMixedCtx->eflags.Bits.u1RF = 0;
8688 pMixedCtx->esp += cbParm;
8689 pMixedCtx->esp &= uMask;
8690 pMixedCtx->rip += pDis->cbInstr;
8691 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
8692 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
8693 break;
8694 }
8695
8696 case OP_PUSHF:
8697 {
8698 uint32_t cbParm = 0;
8699 uint32_t uMask = 0;
8700 if (pDis->fPrefix & DISPREFIX_OPSIZE)
8701 {
8702 cbParm = 4;
8703 uMask = 0xffffffff;
8704 }
8705 else
8706 {
8707 cbParm = 2;
8708 uMask = 0xffff;
8709 }
8710
8711 /* Get the stack pointer & push the contents of eflags onto the stack. */
8712 RTGCPTR GCPtrStack = 0;
8713 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
8714 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
8715 if (RT_FAILURE(rc))
8716 {
8717 rc = VERR_EM_INTERPRETER;
8718 break;
8719 }
8720 X86EFLAGS uEflags;
8721 uEflags = pMixedCtx->eflags;
8722 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
8723 uEflags.Bits.u1RF = 0;
8724 uEflags.Bits.u1VM = 0;
8725
8726 rc = PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &uEflags.u, cbParm);
8727 if (RT_FAILURE(rc))
8728 {
8729 rc = VERR_EM_INTERPRETER;
8730 break;
8731 }
8732 Log(("PUSHF %x -> %#RGv\n", uEflags.u, GCPtrStack));
8733 pMixedCtx->esp -= cbParm;
8734 pMixedCtx->esp &= uMask;
8735 pMixedCtx->rip += pDis->cbInstr;
8736 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP;
8737 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
8738 break;
8739 }
8740
8741 case OP_IRET:
8742 {
8743 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
8744 * instruction reference. */
8745 RTGCPTR GCPtrStack = 0;
8746 uint32_t uMask = 0xffff;
8747 uint16_t aIretFrame[3];
8748 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
8749 {
8750 rc = VERR_EM_INTERPRETER;
8751 break;
8752 }
8753 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
8754 &GCPtrStack);
8755 if (RT_SUCCESS(rc))
8756 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame));
8757 if (RT_FAILURE(rc))
8758 {
8759 rc = VERR_EM_INTERPRETER;
8760 break;
8761 }
8762 pMixedCtx->eip = 0;
8763 pMixedCtx->ip = aIretFrame[0];
8764 pMixedCtx->cs.Sel = aIretFrame[1];
8765 pMixedCtx->cs.ValidSel = aIretFrame[1];
8766 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
8767 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
8768 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
8769 pMixedCtx->sp += sizeof(aIretFrame);
8770 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_SEGMENT_REGS | HM_CHANGED_GUEST_RSP
8771 | HM_CHANGED_GUEST_RFLAGS;
8772 Log(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
8773 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
8774 break;
8775 }
8776
8777 case OP_INT:
8778 {
8779 uint16_t uVector = pDis->Param1.uValue & 0xff;
8780 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
8781 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
8782 break;
8783 }
8784
8785 case OP_INTO:
8786 {
8787 if (pMixedCtx->eflags.Bits.u1OF)
8788 {
8789 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
8790 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
8791 }
8792 break;
8793 }
8794
8795 default:
8796 {
8797 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
8798 EMCODETYPE_SUPERVISOR);
8799 rc = VBOXSTRICTRC_VAL(rc2);
8800 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
8801 Log(("#GP rc=%Rrc\n", rc));
8802 break;
8803 }
8804 }
8805 }
8806 else
8807 rc = VERR_EM_INTERPRETER;
8808
8809 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
8810 ("#GP Unexpected rc=%Rrc\n", rc));
8811 return rc;
8812}
8813
8814
8815/**
8816 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
8817 * the exception reported in the VMX transient structure back into the VM.
8818 *
8819 * @remarks Requires uExitIntrInfo in the VMX transient structure to be
8820 * up-to-date.
8821 */
8822static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8823{
8824 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8825
8826 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
8827 hmR0VmxCheckExitDueToEventDelivery(). */
8828 int rc = hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8829 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8830 AssertRCReturn(rc, rc);
8831 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
8832 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8833 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
8834 return VINF_SUCCESS;
8835}
8836
8837
8838/**
8839 * VM-exit exception handler for #PF (Page-fault exception).
8840 */
8841static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8842{
8843 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
8844 PVM pVM = pVCpu->CTX_SUFF(pVM);
8845 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8846 rc |= hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8847 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8848 AssertRCReturn(rc, rc);
8849
8850#if defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) || defined(HMVMX_ALWAYS_TRAP_PF)
8851 if (pVM->hm.s.fNestedPaging)
8852 {
8853 if (RT_LIKELY(!pVmxTransient->fVectoringPF))
8854 {
8855 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
8856 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
8857 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8858 0 /* cbInstr */, pVmxTransient->uExitIntrErrorCode, pVmxTransient->uExitQualification);
8859 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
8860 }
8861 else
8862 {
8863 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
8864 pVCpu->hm.s.Event.fPending = false; /* A vectoring #PF. */
8865 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
8866 Log(("Pending #DF due to vectoring #PF. NP\n"));
8867 }
8868 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
8869 return rc;
8870 }
8871#else
8872 Assert(!pVM->hm.s.fNestedPaging);
8873#endif
8874
8875#ifdef VBOX_HM_WITH_GUEST_PATCHING
8876 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8877 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8878 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8879 AssertRCReturn(rc, rc);
8880 /* Shortcut for APIC TPR access, only for 32-bit guests. */
8881 if ( pVM->hm.s.fTRPPatchingAllowed
8882 && pVM->hm.s.pGuestPatchMem
8883 && (pVmxTransient->uExitQualification & 0xfff) == 0x80 /* TPR offset */
8884 && !(pVmxTransient->uExitIntrErrorCode & X86_TRAP_PF_P) /* Page not present */
8885 && CPUMGetGuestCPL(pVCpu) == 0 /* Requires CR0, EFLAGS, segments. */
8886 && !CPUMIsGuestInLongModeEx(pMixedCtx) /* Requires EFER. */
8887 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
8888 {
8889 RTGCPHYS GCPhys;
8890 RTGCPHYS GCPhysApicBase = (pMixedCtx->msrApicBase & PAGE_BASE_GC_MASK);
8891 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pVmxTransient->uExitQualification, NULL /* pfFlags */, &GCPhys);
8892 if ( rc == VINF_SUCCESS
8893 && GCPhys == GCPhysApicBase)
8894 {
8895 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
8896 AssertRCReturn(rc, rc);
8897
8898 /* Only attempt to patch the instruction once. */
8899 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pMixedCtx->eip);
8900 if (!pPatch)
8901 return VINF_EM_HM_PATCH_TPR_INSTR;
8902 }
8903 }
8904#endif
8905
8906 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8907 AssertRCReturn(rc, rc);
8908
8909 Log(("#PF: cr2=%#RGv cs:rip=%#04x:%#RGv uErrCode %#RX32 cr3=%#RGv\n", pVmxTransient->uExitQualification, pMixedCtx->cs.Sel,
8910 pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode, pMixedCtx->cr3));
8911
8912 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntrErrorCode);
8913 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntrErrorCode, CPUMCTX2CORE(pMixedCtx),
8914 (RTGCPTR)pVmxTransient->uExitQualification);
8915
8916 Log(("#PF: rc=%Rrc\n", rc));
8917 if (rc == VINF_SUCCESS)
8918 {
8919 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
8920 /** @todo this isn't quite right, what if guest does lgdt with some MMIO
8921 * memory? We don't update the whole state here... */
8922 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
8923 | HM_CHANGED_VMX_GUEST_APIC_STATE;
8924 TRPMResetTrap(pVCpu);
8925 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
8926 return rc;
8927 }
8928 else if (rc == VINF_EM_RAW_GUEST_TRAP)
8929 {
8930 if (!pVmxTransient->fVectoringPF)
8931 {
8932 /* It's a guest page fault and needs to be reflected to the guest. */
8933 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
8934 TRPMResetTrap(pVCpu);
8935 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
8936 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
8937 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
8938 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
8939 }
8940 else
8941 {
8942 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
8943 TRPMResetTrap(pVCpu);
8944 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF for replace it with #DF. */
8945 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
8946 Log(("#PF: Pending #DF due to vectoring #PF\n"));
8947 }
8948
8949 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
8950 return VINF_SUCCESS;
8951 }
8952
8953 TRPMResetTrap(pVCpu);
8954 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
8955 return rc;
8956}
8957
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette