VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 52023

Last change on this file since 52023 was 52009, checked in by vboxsync, 10 years ago

Comments and nits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 492.6 KB
Line 
1/* $Id: HMVMXR0.cpp 52009 2014-07-14 05:27:31Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <iprt/x86.h>
23#include <iprt/asm-amd64-x86.h>
24#include <iprt/thread.h>
25#include <iprt/string.h>
26
27#include "HMInternal.h"
28#include <VBox/vmm/vm.h>
29#include "HMVMXR0.h"
30#include <VBox/vmm/pdmapi.h>
31#include <VBox/vmm/dbgf.h>
32#include <VBox/vmm/iem.h>
33#include <VBox/vmm/iom.h>
34#include <VBox/vmm/selm.h>
35#include <VBox/vmm/tm.h>
36#include <VBox/vmm/gim.h>
37#ifdef VBOX_WITH_REM
38# include <VBox/vmm/rem.h>
39#endif
40#ifdef DEBUG_ramshankar
41# define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS
42# define HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE
43# define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
44# define HMVMX_ALWAYS_CHECK_GUEST_STATE
45# define HMVMX_ALWAYS_TRAP_ALL_XCPTS
46# define HMVMX_ALWAYS_TRAP_PF
47# define HMVMX_ALWAYS_SWAP_FPU_STATE
48# define HMVMX_ALWAYS_FLUSH_TLB
49# define HMVMX_ALWAYS_SWAP_EFER
50#endif
51
52
53/*******************************************************************************
54* Defined Constants And Macros *
55*******************************************************************************/
56#if defined(RT_ARCH_AMD64)
57# define HMVMX_IS_64BIT_HOST_MODE() (true)
58typedef RTHCUINTREG HMVMXHCUINTREG;
59#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
60extern "C" uint32_t g_fVMXIs64bitHost;
61# define HMVMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)
62typedef uint64_t HMVMXHCUINTREG;
63#else
64# define HMVMX_IS_64BIT_HOST_MODE() (false)
65typedef RTHCUINTREG HMVMXHCUINTREG;
66#endif
67
68/** Use the function table. */
69#define HMVMX_USE_FUNCTION_TABLE
70
71/** Determine which tagged-TLB flush handler to use. */
72#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
73#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
74#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
75#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
76
77/** @name Updated-guest-state flags.
78 * @{ */
79#define HMVMX_UPDATED_GUEST_RIP RT_BIT(0)
80#define HMVMX_UPDATED_GUEST_RSP RT_BIT(1)
81#define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2)
82#define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3)
83#define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4)
84#define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5)
85#define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6)
86#define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7)
87#define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8)
88#define HMVMX_UPDATED_GUEST_TR RT_BIT(9)
89#define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10)
90#define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(11)
91#define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(12)
92#define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(13)
93#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(14)
94#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(15)
95#define HMVMX_UPDATED_GUEST_LAZY_MSRS RT_BIT(16)
96#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(17)
97#define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(18)
98#define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \
99 | HMVMX_UPDATED_GUEST_RSP \
100 | HMVMX_UPDATED_GUEST_RFLAGS \
101 | HMVMX_UPDATED_GUEST_CR0 \
102 | HMVMX_UPDATED_GUEST_CR3 \
103 | HMVMX_UPDATED_GUEST_CR4 \
104 | HMVMX_UPDATED_GUEST_GDTR \
105 | HMVMX_UPDATED_GUEST_IDTR \
106 | HMVMX_UPDATED_GUEST_LDTR \
107 | HMVMX_UPDATED_GUEST_TR \
108 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \
109 | HMVMX_UPDATED_GUEST_DEBUG \
110 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \
111 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \
112 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \
113 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \
114 | HMVMX_UPDATED_GUEST_LAZY_MSRS \
115 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \
116 | HMVMX_UPDATED_GUEST_APIC_STATE)
117/** @} */
118
119/** @name
120 * Flags to skip redundant reads of some common VMCS fields that are not part of
121 * the guest-CPU state but are in the transient structure.
122 */
123#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
124#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
125#define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
126#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3)
127#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
128#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
129#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO RT_BIT(6)
130/** @} */
131
132/** @name
133 * States of the VMCS.
134 *
135 * This does not reflect all possible VMCS states but currently only those
136 * needed for maintaining the VMCS consistently even when thread-context hooks
137 * are used. Maybe later this can be extended (i.e. Nested Virtualization).
138 */
139#define HMVMX_VMCS_STATE_CLEAR RT_BIT(0)
140#define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1)
141#define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2)
142/** @} */
143
144/**
145 * Exception bitmap mask for real-mode guests (real-on-v86).
146 *
147 * We need to intercept all exceptions manually (except #PF). #NM is also
148 * handled separately, see hmR0VmxLoadSharedCR0(). #PF need not be intercepted
149 * even in real-mode if we have Nested Paging support.
150 */
151#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) \
152 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
153 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
154 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
155 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
156 | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \
157 | RT_BIT(X86_XCPT_XF))
158
159/**
160 * Exception bitmap mask for all contributory exceptions.
161 *
162 * Page fault is deliberately excluded here as it's conditional as to whether
163 * it's contributory or benign. Page faults are handled separately.
164 */
165#define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
166 | RT_BIT(X86_XCPT_DE))
167
168/** Maximum VM-instruction error number. */
169#define HMVMX_INSTR_ERROR_MAX 28
170
171/** Profiling macro. */
172#ifdef HM_PROFILE_EXIT_DISPATCH
173# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
174# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
175#else
176# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
177# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
178#endif
179
180/** Assert that preemption is disabled or covered by thread-context hooks. */
181#define HMVMX_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
182 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
183
184/** Assert that we haven't migrated CPUs when thread-context hooks are not
185 * used. */
186#define HMVMX_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
187 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
188 ("Illegal migration! Entered on CPU %u Current %u\n", \
189 pVCpu->hm.s.idEnteredCpu, RTMpCpuId())); \
190
191/** Helper macro for VM-exit handlers called unexpectedly. */
192#define HMVMX_RETURN_UNEXPECTED_EXIT() \
193 do { \
194 pVCpu->hm.s.u32HMError = pVmxTransient->uExitReason; \
195 return VERR_VMX_UNEXPECTED_EXIT; \
196 } while (0)
197
198
199/*******************************************************************************
200* Structures and Typedefs *
201*******************************************************************************/
202/**
203 * VMX transient state.
204 *
205 * A state structure for holding miscellaneous information across
206 * VMX non-root operation and restored after the transition.
207 */
208typedef struct VMXTRANSIENT
209{
210 /** The host's rflags/eflags. */
211 RTCCUINTREG uEflags;
212#if HC_ARCH_BITS == 32
213 uint32_t u32Alignment0;
214#endif
215 /** The guest's TPR value used for TPR shadowing. */
216 uint8_t u8GuestTpr;
217 /** Alignment. */
218 uint8_t abAlignment0[7];
219
220 /** The basic VM-exit reason. */
221 uint16_t uExitReason;
222 /** Alignment. */
223 uint16_t u16Alignment0;
224 /** The VM-exit interruption error code. */
225 uint32_t uExitIntErrorCode;
226 /** The VM-exit exit qualification. */
227 uint64_t uExitQualification;
228
229 /** The VM-exit interruption-information field. */
230 uint32_t uExitIntInfo;
231 /** The VM-exit instruction-length field. */
232 uint32_t cbInstr;
233 /** The VM-exit instruction-information field. */
234 union
235 {
236 /** Plain unsigned int representation. */
237 uint32_t u;
238 /** INS and OUTS information. */
239 struct
240 {
241 uint32_t u6Reserved0 : 7;
242 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
243 uint32_t u3AddrSize : 3;
244 uint32_t u5Reserved1 : 5;
245 /** The segment register (X86_SREG_XXX). */
246 uint32_t iSegReg : 3;
247 uint32_t uReserved2 : 14;
248 } StrIo;
249 } ExitInstrInfo;
250 /** Whether the VM-entry failed or not. */
251 bool fVMEntryFailed;
252 /** Alignment. */
253 uint8_t abAlignment1[3];
254
255 /** The VM-entry interruption-information field. */
256 uint32_t uEntryIntInfo;
257 /** The VM-entry exception error code field. */
258 uint32_t uEntryXcptErrorCode;
259 /** The VM-entry instruction length field. */
260 uint32_t cbEntryInstr;
261
262 /** IDT-vectoring information field. */
263 uint32_t uIdtVectoringInfo;
264 /** IDT-vectoring error code. */
265 uint32_t uIdtVectoringErrorCode;
266
267 /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
268 uint32_t fVmcsFieldsRead;
269
270 /** Whether the guest FPU was active at the time of VM-exit. */
271 bool fWasGuestFPUStateActive;
272 /** Whether the guest debug state was active at the time of VM-exit. */
273 bool fWasGuestDebugStateActive;
274 /** Whether the hyper debug state was active at the time of VM-exit. */
275 bool fWasHyperDebugStateActive;
276 /** Whether TSC-offsetting should be setup before VM-entry. */
277 bool fUpdateTscOffsettingAndPreemptTimer;
278 /** Whether the VM-exit was caused by a page-fault during delivery of a
279 * contributory exception or a page-fault. */
280 bool fVectoringPF;
281} VMXTRANSIENT;
282AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
283AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntInfo, sizeof(uint64_t));
284AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo, sizeof(uint64_t));
285AssertCompileMemberAlignment(VMXTRANSIENT, fWasGuestFPUStateActive, sizeof(uint64_t));
286AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
287/** Pointer to VMX transient state. */
288typedef VMXTRANSIENT *PVMXTRANSIENT;
289
290
291/**
292 * MSR-bitmap read permissions.
293 */
294typedef enum VMXMSREXITREAD
295{
296 /** Reading this MSR causes a VM-exit. */
297 VMXMSREXIT_INTERCEPT_READ = 0xb,
298 /** Reading this MSR does not cause a VM-exit. */
299 VMXMSREXIT_PASSTHRU_READ
300} VMXMSREXITREAD;
301/** Pointer to MSR-bitmap read permissions. */
302typedef VMXMSREXITREAD* PVMXMSREXITREAD;
303
304/**
305 * MSR-bitmap write permissions.
306 */
307typedef enum VMXMSREXITWRITE
308{
309 /** Writing to this MSR causes a VM-exit. */
310 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
311 /** Writing to this MSR does not cause a VM-exit. */
312 VMXMSREXIT_PASSTHRU_WRITE
313} VMXMSREXITWRITE;
314/** Pointer to MSR-bitmap write permissions. */
315typedef VMXMSREXITWRITE* PVMXMSREXITWRITE;
316
317
318/**
319 * VMX VM-exit handler.
320 *
321 * @returns VBox status code.
322 * @param pVCpu Pointer to the VMCPU.
323 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
324 * out-of-sync. Make sure to update the required
325 * fields before using them.
326 * @param pVmxTransient Pointer to the VMX-transient structure.
327 */
328#ifndef HMVMX_USE_FUNCTION_TABLE
329typedef int FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
330#else
331typedef DECLCALLBACK(int) FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
332/** Pointer to VM-exit handler. */
333typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
334#endif
335
336
337/*******************************************************************************
338* Internal Functions *
339*******************************************************************************/
340static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush);
341static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr);
342static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
343 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntState);
344#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
345static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
346#endif
347#ifndef HMVMX_USE_FUNCTION_TABLE
348DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
349# define HMVMX_EXIT_DECL static int
350#else
351# define HMVMX_EXIT_DECL static DECLCALLBACK(int)
352#endif
353
354/** @name VM-exit handlers.
355 * @{
356 */
357static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmi;
358static FNVMXEXITHANDLER hmR0VmxExitExtInt;
359static FNVMXEXITHANDLER hmR0VmxExitTripleFault;
360static FNVMXEXITHANDLER hmR0VmxExitInitSignal;
361static FNVMXEXITHANDLER hmR0VmxExitSipi;
362static FNVMXEXITHANDLER hmR0VmxExitIoSmi;
363static FNVMXEXITHANDLER hmR0VmxExitSmi;
364static FNVMXEXITHANDLER hmR0VmxExitIntWindow;
365static FNVMXEXITHANDLER hmR0VmxExitNmiWindow;
366static FNVMXEXITHANDLER hmR0VmxExitTaskSwitch;
367static FNVMXEXITHANDLER hmR0VmxExitCpuid;
368static FNVMXEXITHANDLER hmR0VmxExitGetsec;
369static FNVMXEXITHANDLER hmR0VmxExitHlt;
370static FNVMXEXITHANDLER hmR0VmxExitInvd;
371static FNVMXEXITHANDLER hmR0VmxExitInvlpg;
372static FNVMXEXITHANDLER hmR0VmxExitRdpmc;
373static FNVMXEXITHANDLER hmR0VmxExitVmcall;
374static FNVMXEXITHANDLER hmR0VmxExitRdtsc;
375static FNVMXEXITHANDLER hmR0VmxExitRsm;
376static FNVMXEXITHANDLER hmR0VmxExitSetPendingXcptUD;
377static FNVMXEXITHANDLER hmR0VmxExitMovCRx;
378static FNVMXEXITHANDLER hmR0VmxExitMovDRx;
379static FNVMXEXITHANDLER hmR0VmxExitIoInstr;
380static FNVMXEXITHANDLER hmR0VmxExitRdmsr;
381static FNVMXEXITHANDLER hmR0VmxExitWrmsr;
382static FNVMXEXITHANDLER hmR0VmxExitErrInvalidGuestState;
383static FNVMXEXITHANDLER hmR0VmxExitErrMsrLoad;
384static FNVMXEXITHANDLER hmR0VmxExitErrUndefined;
385static FNVMXEXITHANDLER hmR0VmxExitMwait;
386static FNVMXEXITHANDLER hmR0VmxExitMtf;
387static FNVMXEXITHANDLER hmR0VmxExitMonitor;
388static FNVMXEXITHANDLER hmR0VmxExitPause;
389static FNVMXEXITHANDLER hmR0VmxExitErrMachineCheck;
390static FNVMXEXITHANDLER hmR0VmxExitTprBelowThreshold;
391static FNVMXEXITHANDLER hmR0VmxExitApicAccess;
392static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
393static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
394static FNVMXEXITHANDLER hmR0VmxExitEptViolation;
395static FNVMXEXITHANDLER hmR0VmxExitEptMisconfig;
396static FNVMXEXITHANDLER hmR0VmxExitRdtscp;
397static FNVMXEXITHANDLER hmR0VmxExitPreemptTimer;
398static FNVMXEXITHANDLER hmR0VmxExitWbinvd;
399static FNVMXEXITHANDLER hmR0VmxExitXsetbv;
400static FNVMXEXITHANDLER hmR0VmxExitRdrand;
401static FNVMXEXITHANDLER hmR0VmxExitInvpcid;
402/** @} */
403
404static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
405static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
406static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
407static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
408static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
409static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
410#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
411static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
412#endif
413static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
414
415/*******************************************************************************
416* Global Variables *
417*******************************************************************************/
418#ifdef HMVMX_USE_FUNCTION_TABLE
419
420/**
421 * VMX_EXIT dispatch table.
422 */
423static const PFNVMXEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
424{
425 /* 00 VMX_EXIT_XCPT_OR_NMI */ hmR0VmxExitXcptOrNmi,
426 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
427 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
428 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
429 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
430 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
431 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
432 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
433 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
434 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
435 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
436 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
437 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
438 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
439 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
440 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
441 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
442 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
443 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitVmcall,
444 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
445 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
446 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
447 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
448 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
449 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
450 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
451 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
452 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
453 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
454 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
455 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
456 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
457 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
458 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
459 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
460 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
461 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
462 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
463 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
464 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
465 /* 40 UNDEFINED */ hmR0VmxExitPause,
466 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
467 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
468 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
469 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
470 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
471 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
472 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
473 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
474 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
475 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
476 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
477 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
478 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
479 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
480 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
481 /* 56 UNDEFINED */ hmR0VmxExitErrUndefined,
482 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
483 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
484 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD
485};
486#endif /* HMVMX_USE_FUNCTION_TABLE */
487
488#ifdef VBOX_STRICT
489static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
490{
491 /* 0 */ "(Not Used)",
492 /* 1 */ "VMCALL executed in VMX root operation.",
493 /* 2 */ "VMCLEAR with invalid physical address.",
494 /* 3 */ "VMCLEAR with VMXON pointer.",
495 /* 4 */ "VMLAUNCH with non-clear VMCS.",
496 /* 5 */ "VMRESUME with non-launched VMCS.",
497 /* 6 */ "VMRESUME after VMXOFF",
498 /* 7 */ "VM entry with invalid control fields.",
499 /* 8 */ "VM entry with invalid host state fields.",
500 /* 9 */ "VMPTRLD with invalid physical address.",
501 /* 10 */ "VMPTRLD with VMXON pointer.",
502 /* 11 */ "VMPTRLD with incorrect revision identifier.",
503 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
504 /* 13 */ "VMWRITE to read-only VMCS component.",
505 /* 14 */ "(Not Used)",
506 /* 15 */ "VMXON executed in VMX root operation.",
507 /* 16 */ "VM entry with invalid executive-VMCS pointer.",
508 /* 17 */ "VM entry with non-launched executing VMCS.",
509 /* 18 */ "VM entry with executive-VMCS pointer not VMXON pointer.",
510 /* 19 */ "VMCALL with non-clear VMCS.",
511 /* 20 */ "VMCALL with invalid VM-exit control fields.",
512 /* 21 */ "(Not Used)",
513 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
514 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
515 /* 24 */ "VMCALL with invalid SMM-monitor features.",
516 /* 25 */ "VM entry with invalid VM-execution control fields in executive VMCS.",
517 /* 26 */ "VM entry with events blocked by MOV SS.",
518 /* 27 */ "(Not Used)",
519 /* 28 */ "Invalid operand to INVEPT/INVVPID."
520};
521#endif /* VBOX_STRICT */
522
523
524
525/**
526 * Updates the VM's last error record. If there was a VMX instruction error,
527 * reads the error data from the VMCS and updates VCPU's last error record as
528 * well.
529 *
530 * @param pVM Pointer to the VM.
531 * @param pVCpu Pointer to the VMCPU (can be NULL if @a rc is not
532 * VERR_VMX_UNABLE_TO_START_VM or
533 * VERR_VMX_INVALID_VMCS_FIELD).
534 * @param rc The error code.
535 */
536static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
537{
538 AssertPtr(pVM);
539 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
540 || rc == VERR_VMX_UNABLE_TO_START_VM)
541 {
542 AssertPtrReturnVoid(pVCpu);
543 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
544 }
545 pVM->hm.s.lLastError = rc;
546}
547
548
549/**
550 * Reads the VM-entry interruption-information field from the VMCS into the VMX
551 * transient structure.
552 *
553 * @returns VBox status code.
554 * @param pVmxTransient Pointer to the VMX transient structure.
555 *
556 * @remarks No-long-jump zone!!!
557 */
558DECLINLINE(int) hmR0VmxReadEntryIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
559{
560 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
561 AssertRCReturn(rc, rc);
562 return VINF_SUCCESS;
563}
564
565
566/**
567 * Reads the VM-entry exception error code field from the VMCS into
568 * the VMX transient structure.
569 *
570 * @returns VBox status code.
571 * @param pVmxTransient Pointer to the VMX transient structure.
572 *
573 * @remarks No-long-jump zone!!!
574 */
575DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
576{
577 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
578 AssertRCReturn(rc, rc);
579 return VINF_SUCCESS;
580}
581
582
583/**
584 * Reads the VM-entry exception error code field from the VMCS into
585 * the VMX transient structure.
586 *
587 * @returns VBox status code.
588 * @param pVmxTransient Pointer to the VMX transient structure.
589 *
590 * @remarks No-long-jump zone!!!
591 */
592DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
593{
594 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
595 AssertRCReturn(rc, rc);
596 return VINF_SUCCESS;
597}
598
599
600/**
601 * Reads the VM-exit interruption-information field from the VMCS into the VMX
602 * transient structure.
603 *
604 * @returns VBox status code.
605 * @param pVmxTransient Pointer to the VMX transient structure.
606 */
607DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
608{
609 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
610 {
611 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
612 AssertRCReturn(rc, rc);
613 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
614 }
615 return VINF_SUCCESS;
616}
617
618
619/**
620 * Reads the VM-exit interruption error code from the VMCS into the VMX
621 * transient structure.
622 *
623 * @returns VBox status code.
624 * @param pVmxTransient Pointer to the VMX transient structure.
625 */
626DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
627{
628 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
629 {
630 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
631 AssertRCReturn(rc, rc);
632 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
633 }
634 return VINF_SUCCESS;
635}
636
637
638/**
639 * Reads the VM-exit instruction length field from the VMCS into the VMX
640 * transient structure.
641 *
642 * @returns VBox status code.
643 * @param pVCpu Pointer to the VMCPU.
644 * @param pVmxTransient Pointer to the VMX transient structure.
645 */
646DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
647{
648 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
649 {
650 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
651 AssertRCReturn(rc, rc);
652 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
653 }
654 return VINF_SUCCESS;
655}
656
657
658/**
659 * Reads the VM-exit instruction-information field from the VMCS into
660 * the VMX transient structure.
661 *
662 * @returns VBox status code.
663 * @param pVmxTransient Pointer to the VMX transient structure.
664 */
665DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient)
666{
667 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO))
668 {
669 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
670 AssertRCReturn(rc, rc);
671 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO;
672 }
673 return VINF_SUCCESS;
674}
675
676
677/**
678 * Reads the exit qualification from the VMCS into the VMX transient structure.
679 *
680 * @returns VBox status code.
681 * @param pVCpu Pointer to the VMCPU (required for the VMCS cache
682 * case).
683 * @param pVmxTransient Pointer to the VMX transient structure.
684 */
685DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
686{
687 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
688 {
689 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification); NOREF(pVCpu);
690 AssertRCReturn(rc, rc);
691 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
692 }
693 return VINF_SUCCESS;
694}
695
696
697/**
698 * Reads the IDT-vectoring information field from the VMCS into the VMX
699 * transient structure.
700 *
701 * @returns VBox status code.
702 * @param pVmxTransient Pointer to the VMX transient structure.
703 *
704 * @remarks No-long-jump zone!!!
705 */
706DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
707{
708 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
709 {
710 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
711 AssertRCReturn(rc, rc);
712 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
713 }
714 return VINF_SUCCESS;
715}
716
717
718/**
719 * Reads the IDT-vectoring error code from the VMCS into the VMX
720 * transient structure.
721 *
722 * @returns VBox status code.
723 * @param pVmxTransient Pointer to the VMX transient structure.
724 */
725DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
726{
727 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
728 {
729 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
730 AssertRCReturn(rc, rc);
731 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
732 }
733 return VINF_SUCCESS;
734}
735
736
737/**
738 * Enters VMX root mode operation on the current CPU.
739 *
740 * @returns VBox status code.
741 * @param pVM Pointer to the VM (optional, can be NULL, after
742 * a resume).
743 * @param HCPhysCpuPage Physical address of the VMXON region.
744 * @param pvCpuPage Pointer to the VMXON region.
745 */
746static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
747{
748 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
749 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
750 Assert(pvCpuPage);
751 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
752
753 if (pVM)
754 {
755 /* Write the VMCS revision dword to the VMXON region. */
756 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
757 }
758
759 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */
760 RTCCUINTREG uEflags = ASMIntDisableFlags();
761
762 /* Enable the VMX bit in CR4 if necessary. */
763 RTCCUINTREG uCr4 = ASMGetCR4();
764 if (!(uCr4 & X86_CR4_VMXE))
765 ASMSetCR4(uCr4 | X86_CR4_VMXE);
766
767 /* Enter VMX root mode. */
768 int rc = VMXEnable(HCPhysCpuPage);
769 if (RT_FAILURE(rc))
770 ASMSetCR4(uCr4);
771
772 /* Restore interrupts. */
773 ASMSetFlags(uEflags);
774 return rc;
775}
776
777
778/**
779 * Exits VMX root mode operation on the current CPU.
780 *
781 * @returns VBox status code.
782 */
783static int hmR0VmxLeaveRootMode(void)
784{
785 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
786
787 /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */
788 RTCCUINTREG uEflags = ASMIntDisableFlags();
789
790 /* If we're for some reason not in VMX root mode, then don't leave it. */
791 RTCCUINTREG uHostCR4 = ASMGetCR4();
792
793 int rc;
794 if (uHostCR4 & X86_CR4_VMXE)
795 {
796 /* Exit VMX root mode and clear the VMX bit in CR4. */
797 VMXDisable();
798 ASMSetCR4(uHostCR4 & ~X86_CR4_VMXE);
799 rc = VINF_SUCCESS;
800 }
801 else
802 rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE;
803
804 /* Restore interrupts. */
805 ASMSetFlags(uEflags);
806 return rc;
807}
808
809
810/**
811 * Allocates and maps one physically contiguous page. The allocated page is
812 * zero'd out. (Used by various VT-x structures).
813 *
814 * @returns IPRT status code.
815 * @param pMemObj Pointer to the ring-0 memory object.
816 * @param ppVirt Where to store the virtual address of the
817 * allocation.
818 * @param pPhys Where to store the physical address of the
819 * allocation.
820 */
821DECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
822{
823 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
824 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
825 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
826
827 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
828 if (RT_FAILURE(rc))
829 return rc;
830 *ppVirt = RTR0MemObjAddress(*pMemObj);
831 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
832 ASMMemZero32(*ppVirt, PAGE_SIZE);
833 return VINF_SUCCESS;
834}
835
836
837/**
838 * Frees and unmaps an allocated physical page.
839 *
840 * @param pMemObj Pointer to the ring-0 memory object.
841 * @param ppVirt Where to re-initialize the virtual address of
842 * allocation as 0.
843 * @param pHCPhys Where to re-initialize the physical address of the
844 * allocation as 0.
845 */
846DECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
847{
848 AssertPtr(pMemObj);
849 AssertPtr(ppVirt);
850 AssertPtr(pHCPhys);
851 if (*pMemObj != NIL_RTR0MEMOBJ)
852 {
853 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
854 AssertRC(rc);
855 *pMemObj = NIL_RTR0MEMOBJ;
856 *ppVirt = 0;
857 *pHCPhys = 0;
858 }
859}
860
861
862/**
863 * Worker function to free VT-x related structures.
864 *
865 * @returns IPRT status code.
866 * @param pVM Pointer to the VM.
867 */
868static void hmR0VmxStructsFree(PVM pVM)
869{
870 for (VMCPUID i = 0; i < pVM->cCpus; i++)
871 {
872 PVMCPU pVCpu = &pVM->aCpus[i];
873 AssertPtr(pVCpu);
874
875 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
876 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
877
878 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
879 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
880
881 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
882 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
883 }
884
885 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
886#ifdef VBOX_WITH_CRASHDUMP_MAGIC
887 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
888#endif
889}
890
891
892/**
893 * Worker function to allocate VT-x related VM structures.
894 *
895 * @returns IPRT status code.
896 * @param pVM Pointer to the VM.
897 */
898static int hmR0VmxStructsAlloc(PVM pVM)
899{
900 /*
901 * Initialize members up-front so we can cleanup properly on allocation failure.
902 */
903#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
904 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
905 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
906 pVM->hm.s.vmx.HCPhys##a_Name = 0;
907
908#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
909 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
910 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
911 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
912
913#ifdef VBOX_WITH_CRASHDUMP_MAGIC
914 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
915#endif
916 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
917
918 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
919 for (VMCPUID i = 0; i < pVM->cCpus; i++)
920 {
921 PVMCPU pVCpu = &pVM->aCpus[i];
922 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
923 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
924 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
925 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
926 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
927 }
928#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
929#undef VMXLOCAL_INIT_VM_MEMOBJ
930
931 /* The VMCS size cannot be more than 4096 bytes. See Intel spec. Appendix A.1 "Basic VMX Information". */
932 AssertReturnStmt(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.Msrs.u64BasicInfo) <= PAGE_SIZE,
933 (&pVM->aCpus[0])->hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE,
934 VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO);
935
936 /*
937 * Allocate all the VT-x structures.
938 */
939 int rc = VINF_SUCCESS;
940#ifdef VBOX_WITH_CRASHDUMP_MAGIC
941 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
942 if (RT_FAILURE(rc))
943 goto cleanup;
944 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
945 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
946#endif
947
948 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
949 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
950 {
951 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
952 &pVM->hm.s.vmx.HCPhysApicAccess);
953 if (RT_FAILURE(rc))
954 goto cleanup;
955 }
956
957 /*
958 * Initialize per-VCPU VT-x structures.
959 */
960 for (VMCPUID i = 0; i < pVM->cCpus; i++)
961 {
962 PVMCPU pVCpu = &pVM->aCpus[i];
963 AssertPtr(pVCpu);
964
965 /* Allocate the VM control structure (VMCS). */
966 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
967 if (RT_FAILURE(rc))
968 goto cleanup;
969
970 /* Allocate the Virtual-APIC page for transparent TPR accesses. */
971 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
972 {
973 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
974 &pVCpu->hm.s.vmx.HCPhysVirtApic);
975 if (RT_FAILURE(rc))
976 goto cleanup;
977 }
978
979 /*
980 * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for
981 * transparent accesses of specific MSRs.
982 *
983 * If the condition for enabling MSR bitmaps changes here, don't forget to
984 * update HMIsMsrBitmapsAvailable().
985 */
986 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
987 {
988 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
989 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
990 if (RT_FAILURE(rc))
991 goto cleanup;
992 ASMMemFill32(pVCpu->hm.s.vmx.pvMsrBitmap, PAGE_SIZE, UINT32_C(0xffffffff));
993 }
994
995 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
996 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
997 if (RT_FAILURE(rc))
998 goto cleanup;
999
1000 /* Allocate the VM-exit MSR-load page for the host MSRs. */
1001 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
1002 if (RT_FAILURE(rc))
1003 goto cleanup;
1004 }
1005
1006 return VINF_SUCCESS;
1007
1008cleanup:
1009 hmR0VmxStructsFree(pVM);
1010 return rc;
1011}
1012
1013
1014/**
1015 * Does global VT-x initialization (called during module initialization).
1016 *
1017 * @returns VBox status code.
1018 */
1019VMMR0DECL(int) VMXR0GlobalInit(void)
1020{
1021#ifdef HMVMX_USE_FUNCTION_TABLE
1022 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
1023# ifdef VBOX_STRICT
1024 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
1025 Assert(g_apfnVMExitHandlers[i]);
1026# endif
1027#endif
1028 return VINF_SUCCESS;
1029}
1030
1031
1032/**
1033 * Does global VT-x termination (called during module termination).
1034 */
1035VMMR0DECL(void) VMXR0GlobalTerm()
1036{
1037 /* Nothing to do currently. */
1038}
1039
1040
1041/**
1042 * Sets up and activates VT-x on the current CPU.
1043 *
1044 * @returns VBox status code.
1045 * @param pCpu Pointer to the global CPU info struct.
1046 * @param pVM Pointer to the VM (can be NULL after a host resume
1047 * operation).
1048 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
1049 * fEnabledByHost is true).
1050 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
1051 * @a fEnabledByHost is true).
1052 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
1053 * enable VT-x on the host.
1054 * @param pvMsrs Opaque pointer to VMXMSRS struct.
1055 */
1056VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
1057 void *pvMsrs)
1058{
1059 Assert(pCpu);
1060 Assert(pvMsrs);
1061 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1062
1063 /* Enable VT-x if it's not already enabled by the host. */
1064 if (!fEnabledByHost)
1065 {
1066 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
1067 if (RT_FAILURE(rc))
1068 return rc;
1069 }
1070
1071 /*
1072 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been using EPTPs) so
1073 * we don't retain any stale guest-physical mappings which won't get invalidated when flushing by VPID.
1074 */
1075 PVMXMSRS pMsrs = (PVMXMSRS)pvMsrs;
1076 if (pMsrs->u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1077 {
1078 hmR0VmxFlushEpt(NULL /* pVCpu */, VMXFLUSHEPT_ALL_CONTEXTS);
1079 pCpu->fFlushAsidBeforeUse = false;
1080 }
1081 else
1082 pCpu->fFlushAsidBeforeUse = true;
1083
1084 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
1085 ++pCpu->cTlbFlushes;
1086
1087 return VINF_SUCCESS;
1088}
1089
1090
1091/**
1092 * Deactivates VT-x on the current CPU.
1093 *
1094 * @returns VBox status code.
1095 * @param pCpu Pointer to the global CPU info struct.
1096 * @param pvCpuPage Pointer to the VMXON region.
1097 * @param HCPhysCpuPage Physical address of the VMXON region.
1098 *
1099 * @remarks This function should never be called when SUPR0EnableVTx() or
1100 * similar was used to enable VT-x on the host.
1101 */
1102VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
1103{
1104 NOREF(pCpu);
1105 NOREF(pvCpuPage);
1106 NOREF(HCPhysCpuPage);
1107
1108 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1109 return hmR0VmxLeaveRootMode();
1110}
1111
1112
1113/**
1114 * Sets the permission bits for the specified MSR in the MSR bitmap.
1115 *
1116 * @param pVCpu Pointer to the VMCPU.
1117 * @param uMSR The MSR value.
1118 * @param enmRead Whether reading this MSR causes a VM-exit.
1119 * @param enmWrite Whether writing this MSR causes a VM-exit.
1120 */
1121static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
1122{
1123 int32_t iBit;
1124 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1125
1126 /*
1127 * Layout:
1128 * 0x000 - 0x3ff - Low MSR read bits
1129 * 0x400 - 0x7ff - High MSR read bits
1130 * 0x800 - 0xbff - Low MSR write bits
1131 * 0xc00 - 0xfff - High MSR write bits
1132 */
1133 if (uMsr <= 0x00001FFF)
1134 iBit = uMsr;
1135 else if ( uMsr >= 0xC0000000
1136 && uMsr <= 0xC0001FFF)
1137 {
1138 iBit = (uMsr - 0xC0000000);
1139 pbMsrBitmap += 0x400;
1140 }
1141 else
1142 {
1143 AssertMsgFailed(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1144 return;
1145 }
1146
1147 Assert(iBit <= 0x1fff);
1148 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1149 ASMBitSet(pbMsrBitmap, iBit);
1150 else
1151 ASMBitClear(pbMsrBitmap, iBit);
1152
1153 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1154 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1155 else
1156 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1157}
1158
1159
1160#ifdef VBOX_STRICT
1161/**
1162 * Gets the permission bits for the specified MSR in the MSR bitmap.
1163 *
1164 * @returns VBox status code.
1165 * @retval VINF_SUCCESS if the specified MSR is found.
1166 * @retval VERR_NOT_FOUND if the specified MSR is not found.
1167 * @retval VERR_NOT_SUPPORTED if VT-x doesn't allow the MSR.
1168 *
1169 * @param pVCpu Pointer to the VMCPU.
1170 * @param uMsr The MSR.
1171 * @param penmRead Where to store the read permissions.
1172 * @param penmWrite Where to store the write permissions.
1173 */
1174static int hmR0VmxGetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, PVMXMSREXITREAD penmRead, PVMXMSREXITWRITE penmWrite)
1175{
1176 AssertPtrReturn(penmRead, VERR_INVALID_PARAMETER);
1177 AssertPtrReturn(penmWrite, VERR_INVALID_PARAMETER);
1178 int32_t iBit;
1179 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1180
1181 /* See hmR0VmxSetMsrPermission() for the layout. */
1182 if (uMsr <= 0x00001FFF)
1183 iBit = uMsr;
1184 else if ( uMsr >= 0xC0000000
1185 && uMsr <= 0xC0001FFF)
1186 {
1187 iBit = (uMsr - 0xC0000000);
1188 pbMsrBitmap += 0x400;
1189 }
1190 else
1191 {
1192 AssertMsgFailed(("hmR0VmxGetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1193 return VERR_NOT_SUPPORTED;
1194 }
1195
1196 Assert(iBit <= 0x1fff);
1197 if (ASMBitTest(pbMsrBitmap, iBit))
1198 *penmRead = VMXMSREXIT_INTERCEPT_READ;
1199 else
1200 *penmRead = VMXMSREXIT_PASSTHRU_READ;
1201
1202 if (ASMBitTest(pbMsrBitmap + 0x800, iBit))
1203 *penmWrite = VMXMSREXIT_INTERCEPT_WRITE;
1204 else
1205 *penmWrite = VMXMSREXIT_PASSTHRU_WRITE;
1206 return VINF_SUCCESS;
1207}
1208#endif /* VBOX_STRICT */
1209
1210
1211/**
1212 * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
1213 * area.
1214 *
1215 * @returns VBox status code.
1216 * @param pVCpu Pointer to the VMCPU.
1217 * @param cMsrs The number of MSRs.
1218 */
1219DECLINLINE(int) hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)
1220{
1221 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
1222 uint32_t const cMaxSupportedMsrs = MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc);
1223 if (RT_UNLIKELY(cMsrs > cMaxSupportedMsrs))
1224 {
1225 LogRel(("CPU auto-load/store MSR count in VMCS exceeded cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs));
1226 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
1227 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1228 }
1229
1230 /* Update number of guest MSRs to load/store across the world-switch. */
1231 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs); AssertRCReturn(rc, rc);
1232 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs); AssertRCReturn(rc, rc);
1233
1234 /* Update number of host MSRs to load after the world-switch. Identical to guest-MSR count as it's always paired. */
1235 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs); AssertRCReturn(rc, rc);
1236
1237 /* Update the VCPU's copy of the MSR count. */
1238 pVCpu->hm.s.vmx.cMsrs = cMsrs;
1239
1240 return VINF_SUCCESS;
1241}
1242
1243
1244/**
1245 * Adds a new (or updates the value of an existing) guest/host MSR
1246 * pair to be swapped during the world-switch as part of the
1247 * auto-load/store MSR area in the VMCS.
1248 *
1249 * @returns true if the MSR was added -and- its value was updated, false
1250 * otherwise.
1251 * @param pVCpu Pointer to the VMCPU.
1252 * @param uMsr The MSR.
1253 * @param uGuestMsr Value of the guest MSR.
1254 * @param fUpdateHostMsr Whether to update the value of the host MSR if
1255 * necessary.
1256 */
1257static bool hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr)
1258{
1259 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1260 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1261 uint32_t i;
1262 for (i = 0; i < cMsrs; i++)
1263 {
1264 if (pGuestMsr->u32Msr == uMsr)
1265 break;
1266 pGuestMsr++;
1267 }
1268
1269 bool fAdded = false;
1270 if (i == cMsrs)
1271 {
1272 ++cMsrs;
1273 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1274 AssertRC(rc);
1275
1276 /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */
1277 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1278 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1279
1280 fAdded = true;
1281 }
1282
1283 /* Update the MSR values in the auto-load/store MSR area. */
1284 pGuestMsr->u32Msr = uMsr;
1285 pGuestMsr->u64Value = uGuestMsrValue;
1286
1287 /* Create/update the MSR slot in the host MSR area. */
1288 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1289 pHostMsr += i;
1290 pHostMsr->u32Msr = uMsr;
1291
1292 /*
1293 * Update the host MSR only when requested by the caller AND when we're
1294 * adding it to the auto-load/store area. Otherwise, it would have been
1295 * updated by hmR0VmxSaveHostMsrs(). We do this for performance reasons.
1296 */
1297 bool fUpdatedMsrValue = false;
1298 if ( fAdded
1299 && fUpdateHostMsr)
1300 {
1301 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1302 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1303 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1304 fUpdatedMsrValue = true;
1305 }
1306
1307 return fUpdatedMsrValue;
1308}
1309
1310
1311/**
1312 * Removes a guest/host MSR pair to be swapped during the world-switch from the
1313 * auto-load/store MSR area in the VMCS.
1314 *
1315 * @returns VBox status code.
1316 * @param pVCpu Pointer to the VMCPU.
1317 * @param uMsr The MSR.
1318 */
1319static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr)
1320{
1321 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1322 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1323 for (uint32_t i = 0; i < cMsrs; i++)
1324 {
1325 /* Find the MSR. */
1326 if (pGuestMsr->u32Msr == uMsr)
1327 {
1328 /* If it's the last MSR, simply reduce the count. */
1329 if (i == cMsrs - 1)
1330 {
1331 --cMsrs;
1332 break;
1333 }
1334
1335 /* Remove it by swapping the last MSR in place of it, and reducing the count. */
1336 PVMXAUTOMSR pLastGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1337 pLastGuestMsr += cMsrs - 1;
1338 pGuestMsr->u32Msr = pLastGuestMsr->u32Msr;
1339 pGuestMsr->u64Value = pLastGuestMsr->u64Value;
1340
1341 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1342 PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1343 pLastHostMsr += cMsrs - 1;
1344 pHostMsr->u32Msr = pLastHostMsr->u32Msr;
1345 pHostMsr->u64Value = pLastHostMsr->u64Value;
1346 --cMsrs;
1347 break;
1348 }
1349 pGuestMsr++;
1350 }
1351
1352 /* Update the VMCS if the count changed (meaning the MSR was found). */
1353 if (cMsrs != pVCpu->hm.s.vmx.cMsrs)
1354 {
1355 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1356 AssertRCReturn(rc, rc);
1357
1358 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
1359 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1360 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
1361
1362 Log4(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));
1363 return VINF_SUCCESS;
1364 }
1365
1366 return VERR_NOT_FOUND;
1367}
1368
1369
1370/**
1371 * Checks if the specified guest MSR is part of the auto-load/store area in
1372 * the VMCS.
1373 *
1374 * @returns true if found, false otherwise.
1375 * @param pVCpu Pointer to the VMCPU.
1376 * @param uMsr The MSR to find.
1377 */
1378static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1379{
1380 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1381 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1382
1383 for (uint32_t i = 0; i < cMsrs; i++, pGuestMsr++)
1384 {
1385 if (pGuestMsr->u32Msr == uMsr)
1386 return true;
1387 }
1388 return false;
1389}
1390
1391
1392/**
1393 * Updates the value of all host MSRs in the auto-load/store area in the VMCS.
1394 *
1395 * @param pVCpu Pointer to the VMCPU.
1396 *
1397 * @remarks No-long-jump zone!!!
1398 */
1399static void hmR0VmxUpdateAutoLoadStoreHostMsrs(PVMCPU pVCpu)
1400{
1401 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1402 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1403 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1404 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1405
1406 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1407 {
1408 AssertReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr);
1409
1410 /*
1411 * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
1412 * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
1413 */
1414 if (pHostMsr->u32Msr == MSR_K6_EFER)
1415 pHostMsr->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostEfer;
1416 else
1417 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1418 }
1419
1420 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
1421}
1422
1423
1424#if HC_ARCH_BITS == 64
1425/**
1426 * Saves a set of host MSRs to allow read/write passthru access to the guest and
1427 * perform lazy restoration of the host MSRs while leaving VT-x.
1428 *
1429 * @param pVCpu Pointer to the VMCPU.
1430 *
1431 * @remarks No-long-jump zone!!!
1432 */
1433static void hmR0VmxLazySaveHostMsrs(PVMCPU pVCpu)
1434{
1435 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1436
1437 /*
1438 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls().
1439 */
1440 if (!(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_LOADED_GUEST))
1441 {
1442 pVCpu->hm.s.vmx.u64HostLStarMsr = ASMRdMsr(MSR_K8_LSTAR);
1443 pVCpu->hm.s.vmx.u64HostStarMsr = ASMRdMsr(MSR_K6_STAR);
1444 pVCpu->hm.s.vmx.u64HostSFMaskMsr = ASMRdMsr(MSR_K8_SF_MASK);
1445 pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1446 pVCpu->hm.s.vmx.fRestoreHostMsrs |= VMX_RESTORE_HOST_MSR_SAVED_HOST;
1447 }
1448}
1449
1450
1451/**
1452 * Checks whether the MSR belongs to the set of guest MSRs that we restore
1453 * lazily while leaving VT-x.
1454 *
1455 * @returns true if it does, false otherwise.
1456 * @param pVCpu Pointer to the VMCPU.
1457 * @param uMsr The MSR to check.
1458 */
1459static bool hmR0VmxIsLazyGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1460{
1461 NOREF(pVCpu);
1462 switch (uMsr)
1463 {
1464 case MSR_K8_LSTAR:
1465 case MSR_K6_STAR:
1466 case MSR_K8_SF_MASK:
1467 case MSR_K8_KERNEL_GS_BASE:
1468 return true;
1469 }
1470 return false;
1471}
1472
1473
1474/**
1475 * Saves a set of guests MSRs back into the guest-CPU context.
1476 *
1477 * @param pVCpu Pointer to the VMCPU.
1478 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1479 * out-of-sync. Make sure to update the required fields
1480 * before using them.
1481 *
1482 * @remarks No-long-jump zone!!!
1483 */
1484static void hmR0VmxLazySaveGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1485{
1486 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1487 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1488
1489 if (pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_LOADED_GUEST)
1490 {
1491 Assert(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_SAVED_HOST);
1492 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
1493 pMixedCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
1494 pMixedCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
1495 pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1496 }
1497}
1498
1499
1500/**
1501 * Loads a set of guests MSRs to allow read/passthru to the guest.
1502 *
1503 * The name of this function is slightly confusing. This function does NOT
1504 * postpone loading, but loads the MSR right now. "hmR0VmxLazy" is simply a
1505 * common prefix for functions dealing with "lazy restoration" of the shared
1506 * MSRs.
1507 *
1508 * @param pVCpu Pointer to the VMCPU.
1509 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1510 * out-of-sync. Make sure to update the required fields
1511 * before using them.
1512 *
1513 * @remarks No-long-jump zone!!!
1514 */
1515static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1516{
1517 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1518 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1519
1520 Assert(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_SAVED_HOST);
1521 if (!(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_LOADED_GUEST))
1522 {
1523#define VMXLOCAL_LAZY_LOAD_GUEST_MSR(uMsr, a_GuestMsr, a_HostMsr) \
1524 do { \
1525 if (pMixedCtx->msr##a_GuestMsr != pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr) \
1526 ASMWrMsr(uMsr, pMixedCtx->msr##a_GuestMsr); \
1527 else \
1528 Assert(ASMRdMsr(uMsr) == pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr); \
1529 } while (0)
1530
1531 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_LSTAR, LSTAR, LStar);
1532 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K6_STAR, STAR, Star);
1533 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, SFMask);
1534 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBase);
1535#undef VMXLOCAL_LAZY_LOAD_GUEST_MSR
1536 }
1537 else
1538 {
1539 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR);
1540 ASMWrMsr(MSR_K6_STAR, pMixedCtx->msrSTAR);
1541 ASMWrMsr(MSR_K8_SF_MASK, pMixedCtx->msrSFMASK);
1542 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE);
1543 }
1544 pVCpu->hm.s.vmx.fRestoreHostMsrs |= VMX_RESTORE_HOST_MSR_LOADED_GUEST;
1545}
1546
1547
1548/**
1549 * Performs lazy restoration of the set of host MSRs if they were previously
1550 * loaded with guest MSR values.
1551 *
1552 * @param pVCpu Pointer to the VMCPU.
1553 *
1554 * @remarks No-long-jump zone!!!
1555 * @remarks The guest MSRs should have been saved back into the guest-CPU
1556 * context by hmR0VmxSaveGuestLazyMsrs()!!!
1557 */
1558static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu)
1559{
1560 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1561 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1562
1563 if (pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_LOADED_GUEST)
1564 {
1565 Assert(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_SAVED_HOST);
1566 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64HostLStarMsr);
1567 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64HostStarMsr);
1568 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64HostSFMaskMsr);
1569 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr);
1570 }
1571 pVCpu->hm.s.vmx.fRestoreHostMsrs &= ~(VMX_RESTORE_HOST_MSR_LOADED_GUEST | VMX_RESTORE_HOST_MSR_SAVED_HOST);
1572}
1573#endif /* HC_ARCH_BITS == 64 */
1574
1575
1576/**
1577 * Verifies that our cached values of the VMCS controls are all
1578 * consistent with what's actually present in the VMCS.
1579 *
1580 * @returns VBox status code.
1581 * @param pVCpu Pointer to the VMCPU.
1582 */
1583static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu)
1584{
1585 uint32_t u32Val;
1586 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
1587 AssertRCReturn(rc, rc);
1588 AssertMsgReturn(pVCpu->hm.s.vmx.u32EntryCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32EntryCtls, u32Val),
1589 VERR_VMX_ENTRY_CTLS_CACHE_INVALID);
1590
1591 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
1592 AssertRCReturn(rc, rc);
1593 AssertMsgReturn(pVCpu->hm.s.vmx.u32ExitCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ExitCtls, u32Val),
1594 VERR_VMX_EXIT_CTLS_CACHE_INVALID);
1595
1596 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1597 AssertRCReturn(rc, rc);
1598 AssertMsgReturn(pVCpu->hm.s.vmx.u32PinCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32PinCtls, u32Val),
1599 VERR_VMX_PIN_EXEC_CTLS_CACHE_INVALID);
1600
1601 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1602 AssertRCReturn(rc, rc);
1603 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls, u32Val),
1604 VERR_VMX_PROC_EXEC_CTLS_CACHE_INVALID);
1605
1606 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1607 AssertRCReturn(rc, rc);
1608 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls2 == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls2, u32Val),
1609 VERR_VMX_PROC_EXEC2_CTLS_CACHE_INVALID);
1610
1611 return VINF_SUCCESS;
1612}
1613
1614
1615#ifdef VBOX_STRICT
1616/**
1617 * Verifies that our cached host EFER value has not changed
1618 * since we cached it.
1619 *
1620 * @param pVCpu Pointer to the VMCPU.
1621 */
1622static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu)
1623{
1624 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1625
1626 if (pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
1627 {
1628 uint64_t u64Val;
1629 int rc = VMXReadVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, &u64Val);
1630 AssertRC(rc);
1631
1632 uint64_t u64HostEferMsr = ASMRdMsr(MSR_K6_EFER);
1633 AssertMsgReturnVoid(u64HostEferMsr == u64Val, ("u64HostEferMsr=%#RX64 u64Val=%#RX64\n", u64HostEferMsr, u64Val));
1634 }
1635}
1636
1637
1638/**
1639 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the
1640 * VMCS are correct.
1641 *
1642 * @param pVCpu Pointer to the VMCPU.
1643 */
1644static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu)
1645{
1646 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1647
1648 /* Verify MSR counts in the VMCS are what we think it should be. */
1649 uint32_t cMsrs;
1650 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1651 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1652
1653 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs); AssertRC(rc);
1654 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1655
1656 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1657 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1658
1659 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1660 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1661 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1662 {
1663 /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */
1664 AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32 cMsrs=%u\n", pHostMsr->u32Msr,
1665 pGuestMsr->u32Msr, cMsrs));
1666
1667 uint64_t u64Msr = ASMRdMsr(pHostMsr->u32Msr);
1668 AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
1669 pHostMsr->u32Msr, pHostMsr->u64Value, u64Msr, cMsrs));
1670
1671 /* Verify that the permissions are as expected in the MSR bitmap. */
1672 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1673 {
1674 VMXMSREXITREAD enmRead;
1675 VMXMSREXITWRITE enmWrite;
1676 rc = hmR0VmxGetMsrPermission(pVCpu, pGuestMsr->u32Msr, &enmRead, &enmWrite);
1677 AssertMsgReturnVoid(rc == VINF_SUCCESS, ("hmR0VmxGetMsrPermission! failed. rc=%Rrc\n", rc));
1678 if (pGuestMsr->u32Msr == MSR_K6_EFER)
1679 {
1680 AssertMsgReturnVoid(enmRead == VMXMSREXIT_INTERCEPT_READ, ("Passthru read for EFER!?\n"));
1681 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_INTERCEPT_WRITE, ("Passthru write for EFER!?\n"));
1682 }
1683 else
1684 {
1685 AssertMsgReturnVoid(enmRead == VMXMSREXIT_PASSTHRU_READ, ("u32Msr=%#RX32 cMsrs=%u No passthru read!\n",
1686 pGuestMsr->u32Msr, cMsrs));
1687 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 cMsrs=%u No passthru write!\n",
1688 pGuestMsr->u32Msr, cMsrs));
1689 }
1690 }
1691 }
1692}
1693#endif /* VBOX_STRICT */
1694
1695
1696/**
1697 * Flushes the TLB using EPT.
1698 *
1699 * @returns VBox status code.
1700 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1701 * enmFlush).
1702 * @param enmFlush Type of flush.
1703 *
1704 * @remarks Caller is responsible for making sure this function is called only
1705 * when NestedPaging is supported and providing @a enmFlush that is
1706 * supported by the CPU.
1707 * @remarks Can be called with interrupts disabled.
1708 */
1709static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush)
1710{
1711 uint64_t au64Descriptor[2];
1712 if (enmFlush == VMXFLUSHEPT_ALL_CONTEXTS)
1713 au64Descriptor[0] = 0;
1714 else
1715 {
1716 Assert(pVCpu);
1717 au64Descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1718 }
1719 au64Descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1720
1721 int rc = VMXR0InvEPT(enmFlush, &au64Descriptor[0]);
1722 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0,
1723 rc));
1724 if ( RT_SUCCESS(rc)
1725 && pVCpu)
1726 {
1727 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1728 }
1729}
1730
1731
1732/**
1733 * Flushes the TLB using VPID.
1734 *
1735 * @returns VBox status code.
1736 * @param pVM Pointer to the VM.
1737 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1738 * enmFlush).
1739 * @param enmFlush Type of flush.
1740 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1741 * on @a enmFlush).
1742 *
1743 * @remarks Can be called with interrupts disabled.
1744 */
1745static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr)
1746{
1747 NOREF(pVM);
1748 AssertPtr(pVM);
1749 Assert(pVM->hm.s.vmx.fVpid);
1750
1751 uint64_t au64Descriptor[2];
1752 if (enmFlush == VMXFLUSHVPID_ALL_CONTEXTS)
1753 {
1754 au64Descriptor[0] = 0;
1755 au64Descriptor[1] = 0;
1756 }
1757 else
1758 {
1759 AssertPtr(pVCpu);
1760 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1761 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1762 au64Descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1763 au64Descriptor[1] = GCPtr;
1764 }
1765
1766 int rc = VMXR0InvVPID(enmFlush, &au64Descriptor[0]); NOREF(rc);
1767 AssertMsg(rc == VINF_SUCCESS,
1768 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1769 if ( RT_SUCCESS(rc)
1770 && pVCpu)
1771 {
1772 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1773 }
1774}
1775
1776
1777/**
1778 * Invalidates a guest page by guest virtual address. Only relevant for
1779 * EPT/VPID, otherwise there is nothing really to invalidate.
1780 *
1781 * @returns VBox status code.
1782 * @param pVM Pointer to the VM.
1783 * @param pVCpu Pointer to the VMCPU.
1784 * @param GCVirt Guest virtual address of the page to invalidate.
1785 */
1786VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1787{
1788 AssertPtr(pVM);
1789 AssertPtr(pVCpu);
1790 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
1791
1792 bool fFlushPending = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
1793 if (!fFlushPending)
1794 {
1795 /*
1796 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
1797 * See @bugref{6043} and @bugref{6177}.
1798 *
1799 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
1800 * function maybe called in a loop with individual addresses.
1801 */
1802 if (pVM->hm.s.vmx.fVpid)
1803 {
1804 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1805 {
1806 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, GCVirt);
1807 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1808 }
1809 else
1810 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1811 }
1812 else if (pVM->hm.s.fNestedPaging)
1813 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1814 }
1815
1816 return VINF_SUCCESS;
1817}
1818
1819
1820/**
1821 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
1822 * otherwise there is nothing really to invalidate.
1823 *
1824 * @returns VBox status code.
1825 * @param pVM Pointer to the VM.
1826 * @param pVCpu Pointer to the VMCPU.
1827 * @param GCPhys Guest physical address of the page to invalidate.
1828 */
1829VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1830{
1831 NOREF(pVM); NOREF(GCPhys);
1832 LogFlowFunc(("%RGp\n", GCPhys));
1833
1834 /*
1835 * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
1836 * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
1837 * This function might be called in a loop. This should cause a flush-by-EPT if EPT is in use. See @bugref{6568}.
1838 */
1839 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1840 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
1841 return VINF_SUCCESS;
1842}
1843
1844
1845/**
1846 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1847 * case where neither EPT nor VPID is supported by the CPU.
1848 *
1849 * @param pVM Pointer to the VM.
1850 * @param pVCpu Pointer to the VMCPU.
1851 * @param pCpu Pointer to the global HM struct.
1852 *
1853 * @remarks Called with interrupts disabled.
1854 */
1855static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1856{
1857 AssertPtr(pVCpu);
1858 AssertPtr(pCpu);
1859 NOREF(pVM);
1860
1861 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1862
1863 /** @todo TLB shootdown is currently not used. See hmQueueInvlPage(). */
1864#if 0
1865 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1866 pVCpu->hm.s.TlbShootdown.cPages = 0;
1867#endif
1868
1869 Assert(pCpu->idCpu != NIL_RTCPUID);
1870 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1871 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1872 pVCpu->hm.s.fForceTLBFlush = false;
1873 return;
1874}
1875
1876
1877/**
1878 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1879 *
1880 * @param pVM Pointer to the VM.
1881 * @param pVCpu Pointer to the VMCPU.
1882 * @param pCpu Pointer to the global HM CPU struct.
1883 * @remarks All references to "ASID" in this function pertains to "VPID" in
1884 * Intel's nomenclature. The reason is, to avoid confusion in compare
1885 * statements since the host-CPU copies are named "ASID".
1886 *
1887 * @remarks Called with interrupts disabled.
1888 */
1889static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1890{
1891#ifdef VBOX_WITH_STATISTICS
1892 bool fTlbFlushed = false;
1893# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
1894# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
1895 if (!fTlbFlushed) \
1896 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
1897 } while (0)
1898#else
1899# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
1900# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
1901#endif
1902
1903 AssertPtr(pVM);
1904 AssertPtr(pCpu);
1905 AssertPtr(pVCpu);
1906 Assert(pCpu->idCpu != NIL_RTCPUID);
1907
1908 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1909 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1910 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1911
1912 /*
1913 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1914 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1915 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1916 */
1917 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1918 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1919 {
1920 ++pCpu->uCurrentAsid;
1921 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1922 {
1923 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
1924 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1925 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1926 }
1927
1928 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1929 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1930 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1931
1932 /*
1933 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
1934 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
1935 */
1936 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1937 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1938 HMVMX_SET_TAGGED_TLB_FLUSHED();
1939 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); /* Already flushed-by-EPT, skip doing it again below. */
1940 }
1941
1942 /* Check for explicit TLB shootdowns. */
1943 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1944 {
1945 /*
1946 * Changes to the EPT paging structure by VMM requires flushing by EPT as the CPU creates
1947 * guest-physical (only EPT-tagged) mappings while traversing the EPT tables when EPT is in use.
1948 * Flushing by VPID will only flush linear (only VPID-tagged) and combined (EPT+VPID tagged) mappings
1949 * but not guest-physical mappings.
1950 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}.
1951 */
1952 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1953 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1954 HMVMX_SET_TAGGED_TLB_FLUSHED();
1955 }
1956
1957 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
1958 * where it is commented out. Support individual entry flushing
1959 * someday. */
1960#if 0
1961 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1962 {
1963 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1964
1965 /*
1966 * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
1967 * as supported by the CPU.
1968 */
1969 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1970 {
1971 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1972 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1973 }
1974 else
1975 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1976
1977 HMVMX_SET_TAGGED_TLB_FLUSHED();
1978 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1979 pVCpu->hm.s.TlbShootdown.cPages = 0;
1980 }
1981#endif
1982
1983 pVCpu->hm.s.fForceTLBFlush = false;
1984
1985 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
1986
1987 Assert(pVCpu->hm.s.idLastCpu == pCpu->idCpu);
1988 Assert(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes);
1989 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1990 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1991 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1992 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
1993 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
1994 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1995 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1996
1997 /* Update VMCS with the VPID. */
1998 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1999 AssertRC(rc);
2000
2001#undef HMVMX_SET_TAGGED_TLB_FLUSHED
2002}
2003
2004
2005/**
2006 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
2007 *
2008 * @returns VBox status code.
2009 * @param pVM Pointer to the VM.
2010 * @param pVCpu Pointer to the VMCPU.
2011 * @param pCpu Pointer to the global HM CPU struct.
2012 *
2013 * @remarks Called with interrupts disabled.
2014 */
2015static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2016{
2017 AssertPtr(pVM);
2018 AssertPtr(pVCpu);
2019 AssertPtr(pCpu);
2020 Assert(pCpu->idCpu != NIL_RTCPUID);
2021 AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
2022 AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
2023
2024 /*
2025 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
2026 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
2027 */
2028 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2029 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2030 {
2031 pVCpu->hm.s.fForceTLBFlush = true;
2032 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2033 }
2034
2035 /* Check for explicit TLB shootdown flushes. */
2036 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2037 {
2038 pVCpu->hm.s.fForceTLBFlush = true;
2039 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2040 }
2041
2042 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2043 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2044
2045 if (pVCpu->hm.s.fForceTLBFlush)
2046 {
2047 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2048 pVCpu->hm.s.fForceTLBFlush = false;
2049 }
2050 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
2051 * where it is commented out. Support individual entry flushing
2052 * someday. */
2053#if 0
2054 else
2055 {
2056 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2057 {
2058 /* We cannot flush individual entries without VPID support. Flush using EPT. */
2059 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
2060 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2061 }
2062 else
2063 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
2064
2065 pVCpu->hm.s.TlbShootdown.cPages = 0;
2066 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2067 }
2068#endif
2069}
2070
2071
2072/**
2073 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
2074 *
2075 * @returns VBox status code.
2076 * @param pVM Pointer to the VM.
2077 * @param pVCpu Pointer to the VMCPU.
2078 * @param pCpu Pointer to the global HM CPU struct.
2079 *
2080 * @remarks Called with interrupts disabled.
2081 */
2082static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2083{
2084 AssertPtr(pVM);
2085 AssertPtr(pVCpu);
2086 AssertPtr(pCpu);
2087 Assert(pCpu->idCpu != NIL_RTCPUID);
2088 AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
2089 AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
2090
2091 /*
2092 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
2093 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
2094 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
2095 */
2096 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2097 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2098 {
2099 pVCpu->hm.s.fForceTLBFlush = true;
2100 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2101 }
2102
2103 /* Check for explicit TLB shootdown flushes. */
2104 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2105 {
2106 /*
2107 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
2108 * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
2109 * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
2110 */
2111 pVCpu->hm.s.fForceTLBFlush = true;
2112 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2113 }
2114
2115 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2116 if (pVCpu->hm.s.fForceTLBFlush)
2117 {
2118 ++pCpu->uCurrentAsid;
2119 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
2120 {
2121 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
2122 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
2123 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
2124 }
2125
2126 pVCpu->hm.s.fForceTLBFlush = false;
2127 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2128 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
2129 if (pCpu->fFlushAsidBeforeUse)
2130 {
2131 if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_SINGLE_CONTEXT)
2132 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
2133 else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_ALL_CONTEXTS)
2134 {
2135 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
2136 pCpu->fFlushAsidBeforeUse = false;
2137 }
2138 else
2139 {
2140 /* hmR0VmxSetupTaggedTlb() ensures we never get here. Paranoia. */
2141 AssertMsgFailed(("Unsupported VPID-flush context type.\n"));
2142 }
2143 }
2144 }
2145 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
2146 * where it is commented out. Support individual entry flushing
2147 * someday. */
2148#if 0
2149 else
2150 {
2151 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
2152 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
2153 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
2154 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
2155
2156 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2157 {
2158 /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
2159 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2160 {
2161 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
2162 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
2163 }
2164 else
2165 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
2166
2167 pVCpu->hm.s.TlbShootdown.cPages = 0;
2168 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2169 }
2170 else
2171 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
2172 }
2173#endif
2174
2175 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2176 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2177 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2178 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
2179 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2180 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2181 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2182
2183 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
2184 AssertRC(rc);
2185}
2186
2187
2188/**
2189 * Flushes the guest TLB entry based on CPU capabilities.
2190 *
2191 * @param pVCpu Pointer to the VMCPU.
2192 * @param pCpu Pointer to the global HM CPU struct.
2193 */
2194DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2195{
2196#ifdef HMVMX_ALWAYS_FLUSH_TLB
2197 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2198#endif
2199 PVM pVM = pVCpu->CTX_SUFF(pVM);
2200 switch (pVM->hm.s.vmx.uFlushTaggedTlb)
2201 {
2202 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu, pCpu); break;
2203 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu, pCpu); break;
2204 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu, pCpu); break;
2205 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu, pCpu); break;
2206 default:
2207 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
2208 break;
2209 }
2210
2211 /* VMCPU_FF_TLB_SHOOTDOWN is unused. */
2212 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN));
2213
2214 /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */
2215}
2216
2217
2218/**
2219 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
2220 * TLB entries from the host TLB before VM-entry.
2221 *
2222 * @returns VBox status code.
2223 * @param pVM Pointer to the VM.
2224 */
2225static int hmR0VmxSetupTaggedTlb(PVM pVM)
2226{
2227 /*
2228 * Determine optimal flush type for Nested Paging.
2229 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
2230 * guest execution (see hmR3InitFinalizeR0()).
2231 */
2232 if (pVM->hm.s.fNestedPaging)
2233 {
2234 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
2235 {
2236 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
2237 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_SINGLE_CONTEXT;
2238 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
2239 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_ALL_CONTEXTS;
2240 else
2241 {
2242 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
2243 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2244 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2245 }
2246
2247 /* Make sure the write-back cacheable memory type for EPT is supported. */
2248 if (!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
2249 {
2250 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.Msrs.u64EptVpidCaps));
2251 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2252 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2253 }
2254 }
2255 else
2256 {
2257 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
2258 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2259 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2260 }
2261 }
2262
2263 /*
2264 * Determine optimal flush type for VPID.
2265 */
2266 if (pVM->hm.s.vmx.fVpid)
2267 {
2268 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
2269 {
2270 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
2271 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_SINGLE_CONTEXT;
2272 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
2273 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_ALL_CONTEXTS;
2274 else
2275 {
2276 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
2277 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2278 LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
2279 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
2280 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
2281 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
2282 pVM->hm.s.vmx.fVpid = false;
2283 }
2284 }
2285 else
2286 {
2287 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
2288 Log4(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
2289 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
2290 pVM->hm.s.vmx.fVpid = false;
2291 }
2292 }
2293
2294 /*
2295 * Setup the handler for flushing tagged-TLBs.
2296 */
2297 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
2298 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID;
2299 else if (pVM->hm.s.fNestedPaging)
2300 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT;
2301 else if (pVM->hm.s.vmx.fVpid)
2302 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID;
2303 else
2304 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE;
2305 return VINF_SUCCESS;
2306}
2307
2308
2309/**
2310 * Sets up pin-based VM-execution controls in the VMCS.
2311 *
2312 * @returns VBox status code.
2313 * @param pVM Pointer to the VM.
2314 * @param pVCpu Pointer to the VMCPU.
2315 */
2316static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
2317{
2318 AssertPtr(pVM);
2319 AssertPtr(pVCpu);
2320
2321 uint32_t val = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */
2322 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
2323
2324 val |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts causes a VM-exits. */
2325 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts causes a VM-exit. */
2326 Assert(!(val & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI));
2327
2328 /* Enable the VMX preemption timer. */
2329 if (pVM->hm.s.vmx.fUsePreemptTimer)
2330 {
2331 Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
2332 val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
2333 }
2334
2335 if ((val & zap) != val)
2336 {
2337 LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
2338 pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, val, zap));
2339 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
2340 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2341 }
2342
2343 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, val);
2344 AssertRCReturn(rc, rc);
2345
2346 /* Update VCPU with the currently set pin-based VM-execution controls. */
2347 pVCpu->hm.s.vmx.u32PinCtls = val;
2348 return rc;
2349}
2350
2351
2352/**
2353 * Sets up processor-based VM-execution controls in the VMCS.
2354 *
2355 * @returns VBox status code.
2356 * @param pVM Pointer to the VM.
2357 * @param pVMCPU Pointer to the VMCPU.
2358 */
2359static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
2360{
2361 AssertPtr(pVM);
2362 AssertPtr(pVCpu);
2363
2364 int rc = VERR_INTERNAL_ERROR_5;
2365 uint32_t val = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */
2366 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2367
2368 val |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */
2369 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
2370 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
2371 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
2372 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */
2373 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */
2374 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
2375
2376 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
2377 if ( !(pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
2378 || (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
2379 {
2380 LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
2381 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
2382 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2383 }
2384
2385 /* Without Nested Paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
2386 if (!pVM->hm.s.fNestedPaging)
2387 {
2388 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
2389 val |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
2390 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
2391 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
2392 }
2393
2394 /* Use TPR shadowing if supported by the CPU. */
2395 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
2396 {
2397 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2398 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
2399 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
2400 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
2401 AssertRCReturn(rc, rc);
2402
2403 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
2404 /* CR8 writes causes a VM-exit based on TPR threshold. */
2405 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
2406 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
2407 }
2408 else
2409 {
2410 /*
2411 * Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is invalid on 32-bit Intel CPUs.
2412 * Set this control only for 64-bit guests.
2413 */
2414 if (pVM->hm.s.fAllow64BitGuests)
2415 {
2416 val |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads causes a VM-exit. */
2417 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes causes a VM-exit. */
2418 }
2419 }
2420
2421 /* Use MSR-bitmaps if supported by the CPU. */
2422 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
2423 {
2424 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
2425
2426 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2427 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
2428 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2429 AssertRCReturn(rc, rc);
2430
2431 /*
2432 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
2433 * automatically as dedicated fields in the VMCS.
2434 */
2435 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2436 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2437 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2438 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2439 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2440
2441#if HC_ARCH_BITS == 64
2442 /*
2443 * Set passthru permissions for the following MSRs (mandatory for VT-x) required for 64-bit guests.
2444 */
2445 if (pVM->hm.s.fAllow64BitGuests)
2446 {
2447 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2448 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2449 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2450 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2451 }
2452#endif
2453 }
2454
2455 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
2456 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
2457 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
2458
2459 if ((val & zap) != val)
2460 {
2461 LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
2462 pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, val, zap));
2463 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
2464 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2465 }
2466
2467 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, val);
2468 AssertRCReturn(rc, rc);
2469
2470 /* Update VCPU with the currently set processor-based VM-execution controls. */
2471 pVCpu->hm.s.vmx.u32ProcCtls = val;
2472
2473 /*
2474 * Secondary processor-based VM-execution controls.
2475 */
2476 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
2477 {
2478 val = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
2479 zap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2480
2481 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
2482 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
2483
2484 if (pVM->hm.s.fNestedPaging)
2485 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */
2486 else
2487 {
2488 /*
2489 * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
2490 * VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT when INVPCID is executed by the guest.
2491 * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
2492 */
2493 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
2494 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
2495 }
2496
2497 if (pVM->hm.s.vmx.fVpid)
2498 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */
2499
2500 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2501 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
2502
2503 /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
2504 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
2505 * done dynamically. */
2506 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
2507 {
2508 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
2509 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
2510 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
2511 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
2512 AssertRCReturn(rc, rc);
2513 }
2514
2515 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
2516 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
2517
2518 if ((val & zap) != val)
2519 {
2520 LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! "
2521 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, val, zap));
2522 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
2523 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2524 }
2525
2526 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, val);
2527 AssertRCReturn(rc, rc);
2528
2529 /* Update VCPU with the currently set secondary processor-based VM-execution controls. */
2530 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
2531 }
2532 else if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest))
2533 {
2534 LogRel(("hmR0VmxSetupProcCtls: Unrestricted Guest set as true when secondary processor-based VM-execution controls not "
2535 "available\n"));
2536 pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;
2537 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2538 }
2539
2540 return VINF_SUCCESS;
2541}
2542
2543
2544/**
2545 * Sets up miscellaneous (everything other than Pin & Processor-based
2546 * VM-execution) control fields in the VMCS.
2547 *
2548 * @returns VBox status code.
2549 * @param pVM Pointer to the VM.
2550 * @param pVCpu Pointer to the VMCPU.
2551 */
2552static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
2553{
2554 NOREF(pVM);
2555 AssertPtr(pVM);
2556 AssertPtr(pVCpu);
2557
2558 int rc = VERR_GENERAL_FAILURE;
2559
2560 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2561#if 0
2562 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestCR3AndCR4())*/
2563 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); AssertRCReturn(rc, rc);
2564 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); AssertRCReturn(rc, rc);
2565
2566 /*
2567 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
2568 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
2569 * We thus use the exception bitmap to control it rather than use both.
2570 */
2571 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0); AssertRCReturn(rc, rc);
2572 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); AssertRCReturn(rc, rc);
2573
2574 /** @todo Explore possibility of using IO-bitmaps. */
2575 /* All IO & IOIO instructions cause VM-exits. */
2576 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); AssertRCReturn(rc, rc);
2577 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); AssertRCReturn(rc, rc);
2578
2579 /* Initialize the MSR-bitmap area. */
2580 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
2581 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); AssertRCReturn(rc, rc);
2582 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
2583#endif
2584
2585 /* Setup MSR auto-load/store area. */
2586 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
2587 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
2588 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2589 AssertRCReturn(rc, rc);
2590 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2591 AssertRCReturn(rc, rc);
2592
2593 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
2594 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
2595 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
2596 AssertRCReturn(rc, rc);
2597
2598 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
2599 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
2600 AssertRCReturn(rc, rc);
2601
2602 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2603#if 0
2604 /* Setup debug controls */
2605 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
2606 AssertRCReturn(rc, rc);
2607 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
2608 AssertRCReturn(rc, rc);
2609#endif
2610
2611 return rc;
2612}
2613
2614
2615/**
2616 * Sets up the initial exception bitmap in the VMCS based on static conditions
2617 * (i.e. conditions that cannot ever change after starting the VM).
2618 *
2619 * @returns VBox status code.
2620 * @param pVM Pointer to the VM.
2621 * @param pVCpu Pointer to the VMCPU.
2622 */
2623static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
2624{
2625 AssertPtr(pVM);
2626 AssertPtr(pVCpu);
2627
2628 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
2629
2630 uint32_t u32XcptBitmap = 0;
2631
2632 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
2633 if (!pVM->hm.s.fNestedPaging)
2634 u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
2635
2636 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
2637 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
2638 AssertRCReturn(rc, rc);
2639 return rc;
2640}
2641
2642
2643/**
2644 * Sets up the initial guest-state mask. The guest-state mask is consulted
2645 * before reading guest-state fields from the VMCS as VMREADs can be expensive
2646 * for the nested virtualization case (as it would cause a VM-exit).
2647 *
2648 * @param pVCpu Pointer to the VMCPU.
2649 */
2650static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
2651{
2652 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
2653 HMVMXCPU_GST_RESET_TO(pVCpu, HMVMX_UPDATED_GUEST_ALL);
2654 return VINF_SUCCESS;
2655}
2656
2657
2658/**
2659 * Does per-VM VT-x initialization.
2660 *
2661 * @returns VBox status code.
2662 * @param pVM Pointer to the VM.
2663 */
2664VMMR0DECL(int) VMXR0InitVM(PVM pVM)
2665{
2666 LogFlowFunc(("pVM=%p\n", pVM));
2667
2668 int rc = hmR0VmxStructsAlloc(pVM);
2669 if (RT_FAILURE(rc))
2670 {
2671 LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
2672 return rc;
2673 }
2674
2675 return VINF_SUCCESS;
2676}
2677
2678
2679/**
2680 * Does per-VM VT-x termination.
2681 *
2682 * @returns VBox status code.
2683 * @param pVM Pointer to the VM.
2684 */
2685VMMR0DECL(int) VMXR0TermVM(PVM pVM)
2686{
2687 LogFlowFunc(("pVM=%p\n", pVM));
2688
2689#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2690 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
2691 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
2692#endif
2693 hmR0VmxStructsFree(pVM);
2694 return VINF_SUCCESS;
2695}
2696
2697
2698/**
2699 * Sets up the VM for execution under VT-x.
2700 * This function is only called once per-VM during initialization.
2701 *
2702 * @returns VBox status code.
2703 * @param pVM Pointer to the VM.
2704 */
2705VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
2706{
2707 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
2708 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2709
2710 LogFlowFunc(("pVM=%p\n", pVM));
2711
2712 /*
2713 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
2714 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0().
2715 */
2716 /* -XXX- change hmR3InitFinalizeR0Intel() to fail if pRealModeTSS alloc fails. */
2717 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
2718 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
2719 || !pVM->hm.s.vmx.pRealModeTSS))
2720 {
2721 LogRel(("VMXR0SetupVM: invalid real-on-v86 state.\n"));
2722 return VERR_INTERNAL_ERROR;
2723 }
2724
2725#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2726 /*
2727 * This is for the darwin 32-bit/PAE kernels trying to execute 64-bit guests. We don't bother with
2728 * the 32<->64 switcher in this case. This is a rare, legacy use-case with barely any test coverage.
2729 */
2730 if ( pVM->hm.s.fAllow64BitGuests
2731 && !HMVMX_IS_64BIT_HOST_MODE())
2732 {
2733 LogRel(("VMXR0SetupVM: Unsupported guest and host paging mode combination.\n"));
2734 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
2735 }
2736#endif
2737
2738 /* Initialize these always, see hmR3InitFinalizeR0().*/
2739 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NONE;
2740 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NONE;
2741
2742 /* Setup the tagged-TLB flush handlers. */
2743 int rc = hmR0VmxSetupTaggedTlb(pVM);
2744 if (RT_FAILURE(rc))
2745 {
2746 LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
2747 return rc;
2748 }
2749
2750 /* Check if we can use the VMCS controls for swapping the EFER MSR. */
2751 Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
2752#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2753 if ( HMVMX_IS_64BIT_HOST_MODE()
2754 && (pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
2755 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
2756 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR))
2757 {
2758 pVM->hm.s.vmx.fSupportsVmcsEfer = true;
2759 }
2760#endif
2761
2762 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2763 {
2764 PVMCPU pVCpu = &pVM->aCpus[i];
2765 AssertPtr(pVCpu);
2766 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
2767
2768 /* Log the VCPU pointers, useful for debugging SMP VMs. */
2769 Log4(("VMXR0SetupVM: pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
2770
2771 /* Set revision dword at the beginning of the VMCS structure. */
2772 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
2773
2774 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
2775 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2776 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2777 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2778
2779 /* Load this VMCS as the current VMCS. */
2780 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2781 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2782 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2783
2784 rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
2785 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2786 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2787
2788 rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
2789 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2790 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2791
2792 rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
2793 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2794 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2795
2796 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
2797 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2798 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2799
2800 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
2801 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2802 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2803
2804#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2805 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
2806 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2807 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2808#endif
2809
2810 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
2811 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2812 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2813 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2814
2815 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
2816
2817 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
2818 }
2819
2820 return VINF_SUCCESS;
2821}
2822
2823
2824/**
2825 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2826 * the VMCS.
2827 *
2828 * @returns VBox status code.
2829 * @param pVM Pointer to the VM.
2830 * @param pVCpu Pointer to the VMCPU.
2831 */
2832DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
2833{
2834 NOREF(pVM); NOREF(pVCpu);
2835
2836 RTCCUINTREG uReg = ASMGetCR0();
2837 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2838 AssertRCReturn(rc, rc);
2839
2840#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2841 /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */
2842 if (HMVMX_IS_64BIT_HOST_MODE())
2843 {
2844 uint64_t uRegCR3 = HMR0Get64bitCR3();
2845 rc = VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3);
2846 }
2847 else
2848#endif
2849 {
2850 uReg = ASMGetCR3();
2851 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2852 }
2853 AssertRCReturn(rc, rc);
2854
2855 uReg = ASMGetCR4();
2856 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2857 AssertRCReturn(rc, rc);
2858 return rc;
2859}
2860
2861
2862#if HC_ARCH_BITS == 64
2863/**
2864 * Macro for adjusting host segment selectors to satisfy VT-x's VM-entry
2865 * requirements. See hmR0VmxSaveHostSegmentRegs().
2866 */
2867# define VMXLOCAL_ADJUST_HOST_SEG(seg, selValue) \
2868 if ((selValue) & (X86_SEL_RPL | X86_SEL_LDT)) \
2869 { \
2870 bool fValidSelector = true; \
2871 if ((selValue) & X86_SEL_LDT) \
2872 { \
2873 uint32_t uAttr = ASMGetSegAttr((selValue)); \
2874 fValidSelector = RT_BOOL(uAttr != UINT32_MAX && (uAttr & X86_DESC_P)); \
2875 } \
2876 if (fValidSelector) \
2877 { \
2878 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##seg; \
2879 pVCpu->hm.s.vmx.RestoreHost.uHostSel##seg = (selValue); \
2880 } \
2881 (selValue) = 0; \
2882 }
2883#endif
2884
2885
2886/**
2887 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2888 * the host-state area in the VMCS.
2889 *
2890 * @returns VBox status code.
2891 * @param pVM Pointer to the VM.
2892 * @param pVCpu Pointer to the VMCPU.
2893 */
2894DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
2895{
2896 NOREF(pVM);
2897 int rc = VERR_INTERNAL_ERROR_5;
2898
2899#if HC_ARCH_BITS == 64
2900 /*
2901 * If we've executed guest code using VT-x, the host-state bits will be messed up. We
2902 * should -not- save the messed up state without restoring the original host-state. See @bugref{7240}.
2903 */
2904 AssertMsgReturn(!(pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED),
2905 ("Re-saving host-state after executing guest code without leaving VT-x!\n"), VERR_WRONG_ORDER);
2906#endif
2907
2908 /*
2909 * Host DS, ES, FS and GS segment registers.
2910 */
2911#if HC_ARCH_BITS == 64
2912 RTSEL uSelDS = ASMGetDS();
2913 RTSEL uSelES = ASMGetES();
2914 RTSEL uSelFS = ASMGetFS();
2915 RTSEL uSelGS = ASMGetGS();
2916#else
2917 RTSEL uSelDS = 0;
2918 RTSEL uSelES = 0;
2919 RTSEL uSelFS = 0;
2920 RTSEL uSelGS = 0;
2921#endif
2922
2923 /* Recalculate which host-state bits need to be manually restored. */
2924 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
2925
2926 /*
2927 * Host CS and SS segment registers.
2928 */
2929#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2930 RTSEL uSelCS;
2931 RTSEL uSelSS;
2932 if (HMVMX_IS_64BIT_HOST_MODE())
2933 {
2934 uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;
2935 uSelSS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;
2936 }
2937 else
2938 {
2939 /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */
2940 uSelCS = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;
2941 uSelSS = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;
2942 }
2943#else
2944 RTSEL uSelCS = ASMGetCS();
2945 RTSEL uSelSS = ASMGetSS();
2946#endif
2947
2948 /*
2949 * Host TR segment register.
2950 */
2951 RTSEL uSelTR = ASMGetTR();
2952
2953#if HC_ARCH_BITS == 64
2954 /*
2955 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them
2956 * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
2957 */
2958 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS);
2959 VMXLOCAL_ADJUST_HOST_SEG(ES, uSelES);
2960 VMXLOCAL_ADJUST_HOST_SEG(FS, uSelFS);
2961 VMXLOCAL_ADJUST_HOST_SEG(GS, uSelGS);
2962# undef VMXLOCAL_ADJUST_HOST_SEG
2963#endif
2964
2965 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2966 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2967 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2968 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2969 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2970 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2971 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2972 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));
2973 Assert(uSelCS);
2974 Assert(uSelTR);
2975
2976 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2977#if 0
2978 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE))
2979 Assert(uSelSS != 0);
2980#endif
2981
2982 /* Write these host selector fields into the host-state area in the VMCS. */
2983 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); AssertRCReturn(rc, rc);
2984 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); AssertRCReturn(rc, rc);
2985#if HC_ARCH_BITS == 64
2986 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); AssertRCReturn(rc, rc);
2987 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); AssertRCReturn(rc, rc);
2988 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS); AssertRCReturn(rc, rc);
2989 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS); AssertRCReturn(rc, rc);
2990#endif
2991 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR); AssertRCReturn(rc, rc);
2992
2993 /*
2994 * Host GDTR and IDTR.
2995 */
2996 RTGDTR Gdtr;
2997 RT_ZERO(Gdtr);
2998#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2999 if (HMVMX_IS_64BIT_HOST_MODE())
3000 {
3001 X86XDTR64 Gdtr64;
3002 X86XDTR64 Idtr64;
3003 HMR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64);
3004 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr); AssertRCReturn(rc, rc);
3005 rc = VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr); AssertRCReturn(rc, rc);
3006
3007 Gdtr.cbGdt = Gdtr64.cb;
3008 Gdtr.pGdt = (uintptr_t)Gdtr64.uAddr;
3009 }
3010 else
3011#endif
3012 {
3013 RTIDTR Idtr;
3014 ASMGetGDTR(&Gdtr);
3015 ASMGetIDTR(&Idtr);
3016 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); AssertRCReturn(rc, rc);
3017 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); AssertRCReturn(rc, rc);
3018
3019#if HC_ARCH_BITS == 64
3020 /*
3021 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the
3022 * maximum limit (0xffff) on every VM-exit.
3023 */
3024 if (Gdtr.cbGdt != 0xffff)
3025 {
3026 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
3027 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
3028 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
3029 }
3030
3031 /*
3032 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT"
3033 * and Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit as 0xfff, VT-x
3034 * bloating the limit to 0xffff shouldn't cause any different CPU behavior. However, several hosts either insists
3035 * on 0xfff being the limit (Windows Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there
3036 * but botches sidt alignment in at least one consumer). So, we're only allowing IDTR.LIMIT to be left at 0xffff on
3037 * hosts where we are pretty sure it won't cause trouble.
3038 */
3039# if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
3040 if (Idtr.cbIdt < 0x0fff)
3041# else
3042 if (Idtr.cbIdt != 0xffff)
3043# endif
3044 {
3045 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
3046 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
3047 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
3048 }
3049#endif
3050 }
3051
3052 /*
3053 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
3054 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
3055 */
3056 if ((uSelTR | X86_SEL_RPL_LDT) > Gdtr.cbGdt)
3057 {
3058 AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));
3059 return VERR_VMX_INVALID_HOST_STATE;
3060 }
3061
3062 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
3063#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
3064 if (HMVMX_IS_64BIT_HOST_MODE())
3065 {
3066 /* We need the 64-bit TR base for hybrid darwin. */
3067 uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc);
3068 rc = VMXWriteVmcs64(VMX_VMCS_HOST_TR_BASE, u64TRBase);
3069 }
3070 else
3071#endif
3072 {
3073 uintptr_t uTRBase;
3074#if HC_ARCH_BITS == 64
3075 uTRBase = X86DESC64_BASE(pDesc);
3076
3077 /*
3078 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits.
3079 * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else.
3080 * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode.
3081 * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
3082 *
3083 * [1] See Intel spec. 3.5 "System Descriptor Types".
3084 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
3085 */
3086 Assert(pDesc->System.u4Type == 11);
3087 if ( pDesc->System.u16LimitLow != 0x67
3088 || pDesc->System.u4LimitHigh)
3089 {
3090 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
3091 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
3092
3093 /* Store the GDTR here as we need it while restoring TR. */
3094 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
3095 }
3096#else
3097 uTRBase = X86DESC_BASE(pDesc);
3098#endif
3099 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
3100 }
3101 AssertRCReturn(rc, rc);
3102
3103 /*
3104 * Host FS base and GS base.
3105 */
3106#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3107 if (HMVMX_IS_64BIT_HOST_MODE())
3108 {
3109 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
3110 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
3111 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); AssertRCReturn(rc, rc);
3112 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); AssertRCReturn(rc, rc);
3113
3114# if HC_ARCH_BITS == 64
3115 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
3116 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
3117 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
3118 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
3119 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
3120# endif
3121 }
3122#endif
3123 return rc;
3124}
3125
3126
3127/**
3128 * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
3129 * host-state area of the VMCS. Theses MSRs will be automatically restored on
3130 * the host after every successful VM-exit.
3131 *
3132 * @returns VBox status code.
3133 * @param pVM Pointer to the VM.
3134 * @param pVCpu Pointer to the VMCPU.
3135 *
3136 * @remarks No-long-jump zone!!!
3137 */
3138DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
3139{
3140 NOREF(pVM);
3141
3142 AssertPtr(pVCpu);
3143 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
3144
3145 int rc = VINF_SUCCESS;
3146#if HC_ARCH_BITS == 64
3147 if (pVM->hm.s.fAllow64BitGuests)
3148 hmR0VmxLazySaveHostMsrs(pVCpu);
3149#endif
3150
3151 /*
3152 * Host Sysenter MSRs.
3153 */
3154 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
3155 AssertRCReturn(rc, rc);
3156#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
3157 if (HMVMX_IS_64BIT_HOST_MODE())
3158 {
3159 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
3160 AssertRCReturn(rc, rc);
3161 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
3162 }
3163 else
3164 {
3165 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
3166 AssertRCReturn(rc, rc);
3167 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
3168 }
3169#elif HC_ARCH_BITS == 32
3170 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
3171 AssertRCReturn(rc, rc);
3172 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
3173#else
3174 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
3175 AssertRCReturn(rc, rc);
3176 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
3177#endif
3178 AssertRCReturn(rc, rc);
3179
3180 /*
3181 * Host EFER MSR.
3182 * If the CPU supports the newer VMCS controls for managing EFER, use it.
3183 * Otherwise it's done as part of auto-load/store MSR area in the VMCS, see hmR0VmxLoadGuestMsrs().
3184 */
3185 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
3186 {
3187 rc = VMXWriteVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, pVM->hm.s.vmx.u64HostEfer);
3188 AssertRCReturn(rc, rc);
3189 }
3190
3191 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see
3192 * hmR0VmxLoadGuestExitCtls() !! */
3193
3194 return rc;
3195}
3196
3197
3198/**
3199 * Figures out if we need to swap the EFER MSR which is
3200 * particularly expensive.
3201 *
3202 * We check all relevant bits. For now, that's everything
3203 * besides LMA/LME, as these two bits are handled by VM-entry,
3204 * see hmR0VmxLoadGuestExitCtls() and
3205 * hmR0VMxLoadGuestEntryCtls().
3206 *
3207 * @returns true if we need to load guest EFER, false otherwise.
3208 * @param pVCpu Pointer to the VMCPU.
3209 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3210 * out-of-sync. Make sure to update the required fields
3211 * before using them.
3212 *
3213 * @remarks Requires EFER, CR4.
3214 * @remarks No-long-jump zone!!!
3215 */
3216static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3217{
3218#ifdef HMVMX_ALWAYS_SWAP_EFER
3219 return true;
3220#endif
3221
3222#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3223 /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */
3224 if (CPUMIsGuestInLongMode(pVCpu))
3225 return false;
3226#endif
3227
3228 PVM pVM = pVCpu->CTX_SUFF(pVM);
3229 uint64_t u64HostEfer = pVM->hm.s.vmx.u64HostEfer;
3230 uint64_t u64GuestEfer = pMixedCtx->msrEFER;
3231
3232 /*
3233 * For 64-bit guests, if EFER.SCE bit differs, we need to swap to ensure that the
3234 * guest's SYSCALL behaviour isn't screwed. See @bugref{7386}.
3235 */
3236 if ( CPUMIsGuestInLongMode(pVCpu)
3237 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
3238 {
3239 return true;
3240 }
3241
3242 /*
3243 * If the guest uses PAE and EFER.NXE bit differs, we need to swap EFER as it .
3244 * affects guest paging. 64-bit paging implies CR4.PAE as well.
3245 * See Intel spec. 4.5 "IA-32e Paging" and Intel spec. 4.1.1 "Three Paging Modes".
3246 */
3247 if ( (pMixedCtx->cr4 & X86_CR4_PAE)
3248 && (pMixedCtx->cr0 & X86_CR0_PG)
3249 && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
3250 {
3251 /* Assert that host is PAE capable. */
3252 Assert(pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_NX);
3253 return true;
3254 }
3255
3256 /** @todo Check the latest Intel spec. for any other bits,
3257 * like SMEP/SMAP? */
3258 return false;
3259}
3260
3261
3262/**
3263 * Sets up VM-entry controls in the VMCS. These controls can affect things done
3264 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
3265 * controls".
3266 *
3267 * @returns VBox status code.
3268 * @param pVCpu Pointer to the VMCPU.
3269 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3270 * out-of-sync. Make sure to update the required fields
3271 * before using them.
3272 *
3273 * @remarks Requires EFER.
3274 * @remarks No-long-jump zone!!!
3275 */
3276DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3277{
3278 int rc = VINF_SUCCESS;
3279 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS))
3280 {
3281 PVM pVM = pVCpu->CTX_SUFF(pVM);
3282 uint32_t val = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */
3283 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3284
3285 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
3286 val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
3287
3288 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
3289 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3290 {
3291 val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
3292 Log4(("Load: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n"));
3293 }
3294 else
3295 Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
3296
3297 /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */
3298 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3299 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3300 {
3301 val |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR;
3302 Log4(("Load: VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n"));
3303 }
3304
3305 /*
3306 * The following should -not- be set (since we're not in SMM mode):
3307 * - VMX_VMCS_CTRL_ENTRY_ENTRY_SMM
3308 * - VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON
3309 */
3310
3311 /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
3312 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR. */
3313
3314 if ((val & zap) != val)
3315 {
3316 LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
3317 pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0, val, zap));
3318 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
3319 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3320 }
3321
3322 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, val);
3323 AssertRCReturn(rc, rc);
3324
3325 /* Update VCPU with the currently set VM-exit controls. */
3326 pVCpu->hm.s.vmx.u32EntryCtls = val;
3327 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS);
3328 }
3329 return rc;
3330}
3331
3332
3333/**
3334 * Sets up the VM-exit controls in the VMCS.
3335 *
3336 * @returns VBox status code.
3337 * @param pVM Pointer to the VM.
3338 * @param pVCpu Pointer to the VMCPU.
3339 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3340 * out-of-sync. Make sure to update the required fields
3341 * before using them.
3342 *
3343 * @remarks Requires EFER.
3344 */
3345DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3346{
3347 NOREF(pMixedCtx);
3348
3349 int rc = VINF_SUCCESS;
3350 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS))
3351 {
3352 PVM pVM = pVCpu->CTX_SUFF(pVM);
3353 uint32_t val = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0; /* Bits set here must be set in the VMCS. */
3354 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3355
3356 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
3357 val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
3358
3359 /*
3360 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary.
3361 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in hmR0VmxSaveHostMsrs().
3362 */
3363#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3364 if (HMVMX_IS_64BIT_HOST_MODE())
3365 {
3366 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3367 Log4(("Load: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n"));
3368 }
3369 else
3370 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
3371#else
3372 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3373 {
3374 /* The switcher returns to long mode, EFER is managed by the switcher. */
3375 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3376 Log4(("Load: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n"));
3377 }
3378 else
3379 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
3380#endif /* HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
3381
3382 /* If the newer VMCS fields for managing EFER exists, use it. */
3383 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3384 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3385 {
3386 val |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR
3387 | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;
3388 Log4(("Load: VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n"));
3389 }
3390
3391 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
3392 Assert(!(val & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
3393
3394 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
3395 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
3396 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR. */
3397
3398 if (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)
3399 val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
3400
3401 if ((val & zap) != val)
3402 {
3403 LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
3404 pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, val, zap));
3405 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
3406 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3407 }
3408
3409 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, val);
3410 AssertRCReturn(rc, rc);
3411
3412 /* Update VCPU with the currently set VM-exit controls. */
3413 pVCpu->hm.s.vmx.u32ExitCtls = val;
3414 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_EXIT_CTLS);
3415 }
3416 return rc;
3417}
3418
3419
3420/**
3421 * Loads the guest APIC and related state.
3422 *
3423 * @returns VBox status code.
3424 * @param pVM Pointer to the VM.
3425 * @param pVCpu Pointer to the VMCPU.
3426 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3427 * out-of-sync. Make sure to update the required fields
3428 * before using them.
3429 */
3430DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3431{
3432 NOREF(pMixedCtx);
3433
3434 int rc = VINF_SUCCESS;
3435 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE))
3436 {
3437 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
3438 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
3439 {
3440 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
3441
3442 bool fPendingIntr = false;
3443 uint8_t u8Tpr = 0;
3444 uint8_t u8PendingIntr = 0;
3445 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
3446 AssertRCReturn(rc, rc);
3447
3448 /*
3449 * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when
3450 * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt.
3451 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
3452 * the interrupt when we VM-exit for other reasons.
3453 */
3454 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
3455 uint32_t u32TprThreshold = 0;
3456 if (fPendingIntr)
3457 {
3458 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */
3459 const uint8_t u8PendingPriority = (u8PendingIntr >> 4) & 0xf;
3460 const uint8_t u8TprPriority = (u8Tpr >> 4) & 0xf;
3461 if (u8PendingPriority <= u8TprPriority)
3462 u32TprThreshold = u8PendingPriority;
3463 else
3464 u32TprThreshold = u8TprPriority; /* Required for Vista 64-bit guest, see @bugref{6398}. */
3465 }
3466 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
3467
3468 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
3469 AssertRCReturn(rc, rc);
3470 }
3471
3472 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
3473 }
3474 return rc;
3475}
3476
3477
3478/**
3479 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
3480 *
3481 * @returns Guest's interruptibility-state.
3482 * @param pVCpu Pointer to the VMCPU.
3483 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3484 * out-of-sync. Make sure to update the required fields
3485 * before using them.
3486 *
3487 * @remarks No-long-jump zone!!!
3488 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
3489 */
3490DECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3491{
3492 /*
3493 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
3494 * inhibit interrupts or clear any existing interrupt-inhibition.
3495 */
3496 uint32_t uIntrState = 0;
3497 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3498 {
3499 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
3500 AssertMsg(HMVMXCPU_GST_IS_SET(pVCpu, HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS),
3501 ("%#x\n", HMVMXCPU_GST_VALUE(pVCpu)));
3502 if (pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
3503 {
3504 /*
3505 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
3506 * VT-x, the flag's condition to be cleared is met and thus the cleared state is correct.
3507 */
3508 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3509 }
3510 else if (pMixedCtx->eflags.Bits.u1IF)
3511 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
3512 else
3513 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
3514 }
3515 return uIntrState;
3516}
3517
3518
3519/**
3520 * Loads the guest's interruptibility-state into the guest-state area in the
3521 * VMCS.
3522 *
3523 * @returns VBox status code.
3524 * @param pVCpu Pointer to the VMCPU.
3525 * @param uIntrState The interruptibility-state to set.
3526 */
3527static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
3528{
3529 NOREF(pVCpu);
3530 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */
3531 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
3532 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
3533 AssertRCReturn(rc, rc);
3534 return rc;
3535}
3536
3537
3538/**
3539 * Loads the guest's RIP into the guest-state area in the VMCS.
3540 *
3541 * @returns VBox status code.
3542 * @param pVCpu Pointer to the VMCPU.
3543 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3544 * out-of-sync. Make sure to update the required fields
3545 * before using them.
3546 *
3547 * @remarks No-long-jump zone!!!
3548 */
3549static int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3550{
3551 int rc = VINF_SUCCESS;
3552 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))
3553 {
3554 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
3555 AssertRCReturn(rc, rc);
3556
3557 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP);
3558 Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pMixedCtx->rip, HMCPU_CF_VALUE(pVCpu)));
3559 }
3560 return rc;
3561}
3562
3563
3564/**
3565 * Loads the guest's RSP into the guest-state area in the VMCS.
3566 *
3567 * @returns VBox status code.
3568 * @param pVCpu Pointer to the VMCPU.
3569 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3570 * out-of-sync. Make sure to update the required fields
3571 * before using them.
3572 *
3573 * @remarks No-long-jump zone!!!
3574 */
3575static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3576{
3577 int rc = VINF_SUCCESS;
3578 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP))
3579 {
3580 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
3581 AssertRCReturn(rc, rc);
3582
3583 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP);
3584 Log4(("Load: VMX_VMCS_GUEST_RSP=%#RX64\n", pMixedCtx->rsp));
3585 }
3586 return rc;
3587}
3588
3589
3590/**
3591 * Loads the guest's RFLAGS into the guest-state area in the VMCS.
3592 *
3593 * @returns VBox status code.
3594 * @param pVCpu Pointer to the VMCPU.
3595 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3596 * out-of-sync. Make sure to update the required fields
3597 * before using them.
3598 *
3599 * @remarks No-long-jump zone!!!
3600 */
3601static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3602{
3603 int rc = VINF_SUCCESS;
3604 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
3605 {
3606 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
3607 Let us assert it as such and use 32-bit VMWRITE. */
3608 Assert(!(pMixedCtx->rflags.u64 >> 32));
3609 X86EFLAGS Eflags = pMixedCtx->eflags;
3610 Eflags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
3611 Eflags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */
3612
3613 /*
3614 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM-exit.
3615 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
3616 */
3617 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3618 {
3619 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3620 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3621 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = Eflags.u32; /* Save the original eflags of the real-mode guest. */
3622 Eflags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
3623 Eflags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
3624 }
3625
3626 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, Eflags.u32);
3627 AssertRCReturn(rc, rc);
3628
3629 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS);
3630 Log4(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", Eflags.u32));
3631 }
3632 return rc;
3633}
3634
3635
3636/**
3637 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
3638 *
3639 * @returns VBox status code.
3640 * @param pVCpu Pointer to the VMCPU.
3641 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3642 * out-of-sync. Make sure to update the required fields
3643 * before using them.
3644 *
3645 * @remarks No-long-jump zone!!!
3646 */
3647DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3648{
3649 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
3650 AssertRCReturn(rc, rc);
3651 rc = hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
3652 AssertRCReturn(rc, rc);
3653 rc = hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
3654 AssertRCReturn(rc, rc);
3655 return rc;
3656}
3657
3658
3659/**
3660 * Loads the guest CR0 control register into the guest-state area in the VMCS.
3661 * CR0 is partially shared with the host and we have to consider the FPU bits.
3662 *
3663 * @returns VBox status code.
3664 * @param pVM Pointer to the VM.
3665 * @param pVCpu Pointer to the VMCPU.
3666 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3667 * out-of-sync. Make sure to update the required fields
3668 * before using them.
3669 *
3670 * @remarks No-long-jump zone!!!
3671 */
3672static int hmR0VmxLoadSharedCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3673{
3674 /*
3675 * Guest CR0.
3676 * Guest FPU.
3677 */
3678 int rc = VINF_SUCCESS;
3679 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
3680 {
3681 Assert(!(pMixedCtx->cr0 >> 32));
3682 uint32_t u32GuestCR0 = pMixedCtx->cr0;
3683 PVM pVM = pVCpu->CTX_SUFF(pVM);
3684
3685 /* The guest's view (read access) of its CR0 is unblemished. */
3686 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
3687 AssertRCReturn(rc, rc);
3688 Log4(("Load: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", u32GuestCR0));
3689
3690 /* Setup VT-x's view of the guest CR0. */
3691 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
3692 if (pVM->hm.s.fNestedPaging)
3693 {
3694 if (CPUMIsGuestPagingEnabledEx(pMixedCtx))
3695 {
3696 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
3697 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3698 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
3699 }
3700 else
3701 {
3702 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
3703 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3704 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3705 }
3706
3707 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
3708 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3709 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3710
3711 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3712 AssertRCReturn(rc, rc);
3713 }
3714 else
3715 u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
3716
3717 /*
3718 * Guest FPU bits.
3719 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
3720 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
3721 */
3722 u32GuestCR0 |= X86_CR0_NE;
3723 bool fInterceptNM = false;
3724 if (CPUMIsGuestFPUStateActive(pVCpu))
3725 {
3726 fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
3727 /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
3728 We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
3729 }
3730 else
3731 {
3732 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
3733 u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
3734 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
3735 }
3736
3737 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
3738 bool fInterceptMF = false;
3739 if (!(pMixedCtx->cr0 & X86_CR0_NE))
3740 fInterceptMF = true;
3741
3742 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
3743 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3744 {
3745 Assert(PDMVmmDevHeapIsEnabled(pVM));
3746 Assert(pVM->hm.s.vmx.pRealModeTSS);
3747 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
3748 fInterceptNM = true;
3749 fInterceptMF = true;
3750 }
3751 else
3752 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
3753
3754 if (fInterceptNM)
3755 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
3756 else
3757 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
3758
3759 if (fInterceptMF)
3760 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
3761 else
3762 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
3763
3764 /* Additional intercepts for debugging, define these yourself explicitly. */
3765#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3766 pVCpu->hm.s.vmx.u32XcptBitmap |= 0
3767 | RT_BIT(X86_XCPT_BP)
3768 | RT_BIT(X86_XCPT_DB)
3769 | RT_BIT(X86_XCPT_DE)
3770 | RT_BIT(X86_XCPT_NM)
3771 | RT_BIT(X86_XCPT_TS)
3772 | RT_BIT(X86_XCPT_UD)
3773 | RT_BIT(X86_XCPT_NP)
3774 | RT_BIT(X86_XCPT_SS)
3775 | RT_BIT(X86_XCPT_GP)
3776 | RT_BIT(X86_XCPT_PF)
3777 | RT_BIT(X86_XCPT_MF)
3778 ;
3779#elif defined(HMVMX_ALWAYS_TRAP_PF)
3780 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
3781#endif
3782
3783 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
3784
3785 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
3786 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3787 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3788 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
3789 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
3790 else
3791 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
3792
3793 u32GuestCR0 |= uSetCR0;
3794 u32GuestCR0 &= uZapCR0;
3795 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
3796
3797 /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */
3798 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
3799 AssertRCReturn(rc, rc);
3800 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3801 AssertRCReturn(rc, rc);
3802 Log4(("Load: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", u32GuestCR0, uSetCR0, uZapCR0));
3803
3804 /*
3805 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
3806 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
3807 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
3808 */
3809 uint32_t u32CR0Mask = 0;
3810 u32CR0Mask = X86_CR0_PE
3811 | X86_CR0_NE
3812 | X86_CR0_WP
3813 | X86_CR0_PG
3814 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
3815 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
3816 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
3817
3818 /** @todo Avoid intercepting CR0.PE with unrestricted guests. Fix PGM
3819 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
3820 * and @bugref{6944}. */
3821#if 0
3822 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3823 u32CR0Mask &= ~X86_CR0_PE;
3824#endif
3825 if (pVM->hm.s.fNestedPaging)
3826 u32CR0Mask &= ~X86_CR0_WP;
3827
3828 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
3829 if (fInterceptNM)
3830 {
3831 u32CR0Mask |= X86_CR0_TS
3832 | X86_CR0_MP;
3833 }
3834
3835 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
3836 pVCpu->hm.s.vmx.u32CR0Mask = u32CR0Mask;
3837 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask);
3838 AssertRCReturn(rc, rc);
3839 Log4(("Load: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", u32CR0Mask));
3840
3841 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
3842 }
3843 return rc;
3844}
3845
3846
3847/**
3848 * Loads the guest control registers (CR3, CR4) into the guest-state area
3849 * in the VMCS.
3850 *
3851 * @returns VBox status code.
3852 * @param pVM Pointer to the VM.
3853 * @param pVCpu Pointer to the VMCPU.
3854 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3855 * out-of-sync. Make sure to update the required fields
3856 * before using them.
3857 *
3858 * @remarks No-long-jump zone!!!
3859 */
3860static int hmR0VmxLoadGuestCR3AndCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3861{
3862 int rc = VINF_SUCCESS;
3863 PVM pVM = pVCpu->CTX_SUFF(pVM);
3864
3865 /*
3866 * Guest CR2.
3867 * It's always loaded in the assembler code. Nothing to do here.
3868 */
3869
3870 /*
3871 * Guest CR3.
3872 */
3873 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
3874 {
3875 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
3876 if (pVM->hm.s.fNestedPaging)
3877 {
3878 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
3879
3880 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
3881 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
3882 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
3883 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
3884
3885 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
3886 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
3887 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
3888
3889 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
3890 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
3891 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x3f) == 0, /* Bits 6:11 MBZ. */
3892 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3893
3894 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
3895 AssertRCReturn(rc, rc);
3896 Log4(("Load: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3897
3898 if ( pVM->hm.s.vmx.fUnrestrictedGuest
3899 || CPUMIsGuestPagingEnabledEx(pMixedCtx))
3900 {
3901 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
3902 if (CPUMIsGuestInPAEModeEx(pMixedCtx))
3903 {
3904 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); AssertRCReturn(rc, rc);
3905 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
3906 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
3907 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
3908 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
3909 }
3910
3911 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
3912 have Unrestricted Execution to handle the guest when it's not using paging. */
3913 GCPhysGuestCR3 = pMixedCtx->cr3;
3914 }
3915 else
3916 {
3917 /*
3918 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
3919 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
3920 * EPT takes care of translating it to host-physical addresses.
3921 */
3922 RTGCPHYS GCPhys;
3923 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
3924 Assert(PDMVmmDevHeapIsEnabled(pVM));
3925
3926 /* We obtain it here every time as the guest could have relocated this PCI region. */
3927 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
3928 AssertRCReturn(rc, rc);
3929
3930 GCPhysGuestCR3 = GCPhys;
3931 }
3932
3933 Log4(("Load: VMX_VMCS_GUEST_CR3=%#RGv (GstN)\n", GCPhysGuestCR3));
3934 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
3935 }
3936 else
3937 {
3938 /* Non-nested paging case, just use the hypervisor's CR3. */
3939 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
3940
3941 Log4(("Load: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", HCPhysGuestCR3));
3942 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
3943 }
3944 AssertRCReturn(rc, rc);
3945
3946 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
3947 }
3948
3949 /*
3950 * Guest CR4.
3951 */
3952 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
3953 {
3954 Assert(!(pMixedCtx->cr4 >> 32));
3955 uint32_t u32GuestCR4 = pMixedCtx->cr4;
3956
3957 /* The guest's view of its CR4 is unblemished. */
3958 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
3959 AssertRCReturn(rc, rc);
3960 Log4(("Load: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", u32GuestCR4));
3961
3962 /* Setup VT-x's view of the guest CR4. */
3963 /*
3964 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
3965 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
3966 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
3967 */
3968 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3969 {
3970 Assert(pVM->hm.s.vmx.pRealModeTSS);
3971 Assert(PDMVmmDevHeapIsEnabled(pVM));
3972 u32GuestCR4 &= ~X86_CR4_VME;
3973 }
3974
3975 if (pVM->hm.s.fNestedPaging)
3976 {
3977 if ( !CPUMIsGuestPagingEnabledEx(pMixedCtx)
3978 && !pVM->hm.s.vmx.fUnrestrictedGuest)
3979 {
3980 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
3981 u32GuestCR4 |= X86_CR4_PSE;
3982 /* Our identity mapping is a 32-bit page directory. */
3983 u32GuestCR4 &= ~X86_CR4_PAE;
3984 }
3985 /* else use guest CR4.*/
3986 }
3987 else
3988 {
3989 /*
3990 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
3991 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
3992 */
3993 switch (pVCpu->hm.s.enmShadowMode)
3994 {
3995 case PGMMODE_REAL: /* Real-mode. */
3996 case PGMMODE_PROTECTED: /* Protected mode without paging. */
3997 case PGMMODE_32_BIT: /* 32-bit paging. */
3998 {
3999 u32GuestCR4 &= ~X86_CR4_PAE;
4000 break;
4001 }
4002
4003 case PGMMODE_PAE: /* PAE paging. */
4004 case PGMMODE_PAE_NX: /* PAE paging with NX. */
4005 {
4006 u32GuestCR4 |= X86_CR4_PAE;
4007 break;
4008 }
4009
4010 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
4011 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
4012#ifdef VBOX_ENABLE_64_BITS_GUESTS
4013 break;
4014#endif
4015 default:
4016 AssertFailed();
4017 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4018 }
4019 }
4020
4021 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
4022 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
4023 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
4024 u32GuestCR4 |= uSetCR4;
4025 u32GuestCR4 &= uZapCR4;
4026
4027 /* Write VT-x's view of the guest CR4 into the VMCS. */
4028 Log4(("Load: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", u32GuestCR4, uSetCR4, uZapCR4));
4029 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4);
4030 AssertRCReturn(rc, rc);
4031
4032 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM-exit. */
4033 uint32_t u32CR4Mask = 0;
4034 u32CR4Mask = X86_CR4_VME
4035 | X86_CR4_PAE
4036 | X86_CR4_PGE
4037 | X86_CR4_PSE
4038 | X86_CR4_VMXE;
4039 pVCpu->hm.s.vmx.u32CR4Mask = u32CR4Mask;
4040 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask);
4041 AssertRCReturn(rc, rc);
4042
4043 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
4044 }
4045 return rc;
4046}
4047
4048
4049/**
4050 * Loads the guest debug registers into the guest-state area in the VMCS.
4051 * This also sets up whether #DB and MOV DRx accesses cause VM-exits.
4052 *
4053 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
4054 *
4055 * @returns VBox status code.
4056 * @param pVCpu Pointer to the VMCPU.
4057 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4058 * out-of-sync. Make sure to update the required fields
4059 * before using them.
4060 *
4061 * @remarks No-long-jump zone!!!
4062 */
4063static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4064{
4065 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
4066 return VINF_SUCCESS;
4067
4068#ifdef VBOX_STRICT
4069 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
4070 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
4071 {
4072 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
4073 Assert((pMixedCtx->dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0); /* Bits 63:32, 15, 14, 12, 11 are reserved. */
4074 Assert((pMixedCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); /* Bit 10 is reserved (RA1). */
4075 }
4076#endif
4077
4078 int rc;
4079 PVM pVM = pVCpu->CTX_SUFF(pVM);
4080 bool fInterceptDB = false;
4081 bool fInterceptMovDRx = false;
4082 if ( pVCpu->hm.s.fSingleInstruction
4083 || DBGFIsStepping(pVCpu))
4084 {
4085 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
4086 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
4087 {
4088 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
4089 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
4090 AssertRCReturn(rc, rc);
4091 Assert(fInterceptDB == false);
4092 }
4093 else
4094 {
4095 pMixedCtx->eflags.u32 |= X86_EFL_TF;
4096 pVCpu->hm.s.fClearTrapFlag = true;
4097 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
4098 fInterceptDB = true;
4099 }
4100 }
4101
4102 if ( fInterceptDB
4103 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
4104 {
4105 /*
4106 * Use the combined guest and host DRx values found in the hypervisor
4107 * register set because the debugger has breakpoints active or someone
4108 * is single stepping on the host side without a monitor trap flag.
4109 *
4110 * Note! DBGF expects a clean DR6 state before executing guest code.
4111 */
4112#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4113 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4114 && !CPUMIsHyperDebugStateActivePending(pVCpu))
4115 {
4116 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4117 Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
4118 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
4119 }
4120 else
4121#endif
4122 if (!CPUMIsHyperDebugStateActive(pVCpu))
4123 {
4124 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4125 Assert(CPUMIsHyperDebugStateActive(pVCpu));
4126 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
4127 }
4128
4129 /* Update DR7. (The other DRx values are handled by CPUM one way or the other.) */
4130 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)CPUMGetHyperDR7(pVCpu));
4131 AssertRCReturn(rc, rc);
4132
4133 pVCpu->hm.s.fUsingHyperDR7 = true;
4134 fInterceptDB = true;
4135 fInterceptMovDRx = true;
4136 }
4137 else
4138 {
4139 /*
4140 * If the guest has enabled debug registers, we need to load them prior to
4141 * executing guest code so they'll trigger at the right time.
4142 */
4143 if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
4144 {
4145#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4146 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4147 && !CPUMIsGuestDebugStateActivePending(pVCpu))
4148 {
4149 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4150 Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
4151 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
4152 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4153 }
4154 else
4155#endif
4156 if (!CPUMIsGuestDebugStateActive(pVCpu))
4157 {
4158 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4159 Assert(CPUMIsGuestDebugStateActive(pVCpu));
4160 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
4161 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4162 }
4163 Assert(!fInterceptDB);
4164 Assert(!fInterceptMovDRx);
4165 }
4166 /*
4167 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
4168 * must intercept #DB in order to maintain a correct DR6 guest value.
4169 */
4170#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4171 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu)
4172 && !CPUMIsGuestDebugStateActive(pVCpu))
4173#else
4174 else if (!CPUMIsGuestDebugStateActive(pVCpu))
4175#endif
4176 {
4177 fInterceptMovDRx = true;
4178 fInterceptDB = true;
4179 }
4180
4181 /* Update guest DR7. */
4182 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
4183 AssertRCReturn(rc, rc);
4184
4185 pVCpu->hm.s.fUsingHyperDR7 = false;
4186 }
4187
4188 /*
4189 * Update the exception bitmap regarding intercepting #DB generated by the guest.
4190 */
4191 if (fInterceptDB)
4192 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
4193 else if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4194 {
4195#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
4196 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
4197#endif
4198 }
4199 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
4200 AssertRCReturn(rc, rc);
4201
4202 /*
4203 * Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions.
4204 */
4205 if (fInterceptMovDRx)
4206 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4207 else
4208 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4209 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
4210 AssertRCReturn(rc, rc);
4211
4212 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
4213 return VINF_SUCCESS;
4214}
4215
4216
4217#ifdef VBOX_STRICT
4218/**
4219 * Strict function to validate segment registers.
4220 *
4221 * @remarks ASSUMES CR0 is up to date.
4222 */
4223static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4224{
4225 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
4226 /* NOTE: The reason we check for attribute value 0 and not just the unusable bit here is because hmR0VmxWriteSegmentReg()
4227 * only updates the VMCS' copy of the value with the unusable bit and doesn't change the guest-context value. */
4228 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
4229 && ( !CPUMIsGuestInRealModeEx(pCtx)
4230 && !CPUMIsGuestInV86ModeEx(pCtx)))
4231 {
4232 /* Protected mode checks */
4233 /* CS */
4234 Assert(pCtx->cs.Attr.n.u1Present);
4235 Assert(!(pCtx->cs.Attr.u & 0xf00));
4236 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
4237 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4238 || !(pCtx->cs.Attr.n.u1Granularity));
4239 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
4240 || (pCtx->cs.Attr.n.u1Granularity));
4241 /* CS cannot be loaded with NULL in protected mode. */
4242 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS?!? */
4243 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4244 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
4245 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4246 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
4247 else
4248 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
4249 /* SS */
4250 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4251 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
4252 if ( !(pCtx->cr0 & X86_CR0_PE)
4253 || pCtx->cs.Attr.n.u4Type == 3)
4254 {
4255 Assert(!pCtx->ss.Attr.n.u2Dpl);
4256 }
4257 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4258 {
4259 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4260 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
4261 Assert(pCtx->ss.Attr.n.u1Present);
4262 Assert(!(pCtx->ss.Attr.u & 0xf00));
4263 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
4264 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4265 || !(pCtx->ss.Attr.n.u1Granularity));
4266 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
4267 || (pCtx->ss.Attr.n.u1Granularity));
4268 }
4269 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
4270 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4271 {
4272 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4273 Assert(pCtx->ds.Attr.n.u1Present);
4274 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
4275 Assert(!(pCtx->ds.Attr.u & 0xf00));
4276 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
4277 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4278 || !(pCtx->ds.Attr.n.u1Granularity));
4279 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
4280 || (pCtx->ds.Attr.n.u1Granularity));
4281 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4282 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
4283 }
4284 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4285 {
4286 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4287 Assert(pCtx->es.Attr.n.u1Present);
4288 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
4289 Assert(!(pCtx->es.Attr.u & 0xf00));
4290 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
4291 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
4292 || !(pCtx->es.Attr.n.u1Granularity));
4293 Assert( !(pCtx->es.u32Limit & 0xfff00000)
4294 || (pCtx->es.Attr.n.u1Granularity));
4295 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4296 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
4297 }
4298 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4299 {
4300 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4301 Assert(pCtx->fs.Attr.n.u1Present);
4302 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
4303 Assert(!(pCtx->fs.Attr.u & 0xf00));
4304 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
4305 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
4306 || !(pCtx->fs.Attr.n.u1Granularity));
4307 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
4308 || (pCtx->fs.Attr.n.u1Granularity));
4309 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4310 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4311 }
4312 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
4313 {
4314 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4315 Assert(pCtx->gs.Attr.n.u1Present);
4316 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
4317 Assert(!(pCtx->gs.Attr.u & 0xf00));
4318 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
4319 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
4320 || !(pCtx->gs.Attr.n.u1Granularity));
4321 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
4322 || (pCtx->gs.Attr.n.u1Granularity));
4323 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4324 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4325 }
4326 /* 64-bit capable CPUs. */
4327# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4328 if (HMVMX_IS_64BIT_HOST_MODE())
4329 {
4330 Assert(!(pCtx->cs.u64Base >> 32));
4331 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
4332 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
4333 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
4334 }
4335# endif
4336 }
4337 else if ( CPUMIsGuestInV86ModeEx(pCtx)
4338 || ( CPUMIsGuestInRealModeEx(pCtx)
4339 && !pVM->hm.s.vmx.fUnrestrictedGuest))
4340 {
4341 /* Real and v86 mode checks. */
4342 /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
4343 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
4344 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4345 {
4346 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
4347 }
4348 else
4349 {
4350 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
4351 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
4352 }
4353
4354 /* CS */
4355 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
4356 Assert(pCtx->cs.u32Limit == 0xffff);
4357 Assert(u32CSAttr == 0xf3);
4358 /* SS */
4359 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
4360 Assert(pCtx->ss.u32Limit == 0xffff);
4361 Assert(u32SSAttr == 0xf3);
4362 /* DS */
4363 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
4364 Assert(pCtx->ds.u32Limit == 0xffff);
4365 Assert(u32DSAttr == 0xf3);
4366 /* ES */
4367 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
4368 Assert(pCtx->es.u32Limit == 0xffff);
4369 Assert(u32ESAttr == 0xf3);
4370 /* FS */
4371 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
4372 Assert(pCtx->fs.u32Limit == 0xffff);
4373 Assert(u32FSAttr == 0xf3);
4374 /* GS */
4375 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
4376 Assert(pCtx->gs.u32Limit == 0xffff);
4377 Assert(u32GSAttr == 0xf3);
4378 /* 64-bit capable CPUs. */
4379# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4380 if (HMVMX_IS_64BIT_HOST_MODE())
4381 {
4382 Assert(!(pCtx->cs.u64Base >> 32));
4383 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
4384 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
4385 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
4386 }
4387# endif
4388 }
4389}
4390#endif /* VBOX_STRICT */
4391
4392
4393/**
4394 * Writes a guest segment register into the guest-state area in the VMCS.
4395 *
4396 * @returns VBox status code.
4397 * @param pVCpu Pointer to the VMCPU.
4398 * @param idxSel Index of the selector in the VMCS.
4399 * @param idxLimit Index of the segment limit in the VMCS.
4400 * @param idxBase Index of the segment base in the VMCS.
4401 * @param idxAccess Index of the access rights of the segment in the VMCS.
4402 * @param pSelReg Pointer to the segment selector.
4403 *
4404 * @remarks No-long-jump zone!!!
4405 */
4406static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
4407 uint32_t idxAccess, PCPUMSELREG pSelReg)
4408{
4409 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
4410 AssertRCReturn(rc, rc);
4411 rc = VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
4412 AssertRCReturn(rc, rc);
4413 rc = VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
4414 AssertRCReturn(rc, rc);
4415
4416 uint32_t u32Access = pSelReg->Attr.u;
4417 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4418 {
4419 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
4420 u32Access = 0xf3;
4421 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
4422 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
4423 }
4424 else
4425 {
4426 /*
4427 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
4428 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
4429 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
4430 * loaded in protected-mode have their attribute as 0.
4431 */
4432 if (!u32Access)
4433 u32Access = X86DESCATTR_UNUSABLE;
4434 }
4435
4436 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
4437 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
4438 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
4439
4440 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
4441 AssertRCReturn(rc, rc);
4442 return rc;
4443}
4444
4445
4446/**
4447 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
4448 * into the guest-state area in the VMCS.
4449 *
4450 * @returns VBox status code.
4451 * @param pVM Pointer to the VM.
4452 * @param pVCPU Pointer to the VMCPU.
4453 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4454 * out-of-sync. Make sure to update the required fields
4455 * before using them.
4456 *
4457 * @remarks ASSUMES pMixedCtx->cr0 is up to date (strict builds validation).
4458 * @remarks No-long-jump zone!!!
4459 */
4460static int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4461{
4462 int rc = VERR_INTERNAL_ERROR_5;
4463 PVM pVM = pVCpu->CTX_SUFF(pVM);
4464
4465 /*
4466 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
4467 */
4468 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
4469 {
4470 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
4471 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4472 {
4473 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pMixedCtx->cs.Attr.u;
4474 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pMixedCtx->ss.Attr.u;
4475 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pMixedCtx->ds.Attr.u;
4476 pVCpu->hm.s.vmx.RealMode.AttrES.u = pMixedCtx->es.Attr.u;
4477 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pMixedCtx->fs.Attr.u;
4478 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pMixedCtx->gs.Attr.u;
4479 }
4480
4481#ifdef VBOX_WITH_REM
4482 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
4483 {
4484 Assert(pVM->hm.s.vmx.pRealModeTSS);
4485 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
4486 if ( pVCpu->hm.s.vmx.fWasInRealMode
4487 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
4488 {
4489 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
4490 in real-mode (e.g. OpenBSD 4.0) */
4491 REMFlushTBs(pVM);
4492 Log4(("Load: Switch to protected mode detected!\n"));
4493 pVCpu->hm.s.vmx.fWasInRealMode = false;
4494 }
4495 }
4496#endif
4497 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
4498 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs);
4499 AssertRCReturn(rc, rc);
4500 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
4501 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss);
4502 AssertRCReturn(rc, rc);
4503 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
4504 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds);
4505 AssertRCReturn(rc, rc);
4506 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
4507 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es);
4508 AssertRCReturn(rc, rc);
4509 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
4510 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs);
4511 AssertRCReturn(rc, rc);
4512 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
4513 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs);
4514 AssertRCReturn(rc, rc);
4515
4516#ifdef VBOX_STRICT
4517 /* Validate. */
4518 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
4519#endif
4520
4521 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
4522 Log4(("Load: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base,
4523 pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
4524 }
4525
4526 /*
4527 * Guest TR.
4528 */
4529 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))
4530 {
4531 /*
4532 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
4533 * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
4534 * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
4535 */
4536 uint16_t u16Sel = 0;
4537 uint32_t u32Limit = 0;
4538 uint64_t u64Base = 0;
4539 uint32_t u32AccessRights = 0;
4540
4541 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4542 {
4543 u16Sel = pMixedCtx->tr.Sel;
4544 u32Limit = pMixedCtx->tr.u32Limit;
4545 u64Base = pMixedCtx->tr.u64Base;
4546 u32AccessRights = pMixedCtx->tr.Attr.u;
4547 }
4548 else
4549 {
4550 Assert(pVM->hm.s.vmx.pRealModeTSS);
4551 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
4552
4553 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
4554 RTGCPHYS GCPhys;
4555 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
4556 AssertRCReturn(rc, rc);
4557
4558 X86DESCATTR DescAttr;
4559 DescAttr.u = 0;
4560 DescAttr.n.u1Present = 1;
4561 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
4562
4563 u16Sel = 0;
4564 u32Limit = HM_VTX_TSS_SIZE;
4565 u64Base = GCPhys; /* in real-mode phys = virt. */
4566 u32AccessRights = DescAttr.u;
4567 }
4568
4569 /* Validate. */
4570 Assert(!(u16Sel & RT_BIT(2)));
4571 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
4572 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
4573 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
4574 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
4575 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
4576 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
4577 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
4578 Assert( (u32Limit & 0xfff) == 0xfff
4579 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
4580 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
4581 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
4582
4583 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel); AssertRCReturn(rc, rc);
4584 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRCReturn(rc, rc);
4585 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRCReturn(rc, rc);
4586 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc);
4587
4588 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
4589 Log4(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base));
4590 }
4591
4592 /*
4593 * Guest GDTR.
4594 */
4595 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))
4596 {
4597 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); AssertRCReturn(rc, rc);
4598 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); AssertRCReturn(rc, rc);
4599
4600 /* Validate. */
4601 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4602
4603 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);
4604 Log4(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt));
4605 }
4606
4607 /*
4608 * Guest LDTR.
4609 */
4610 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))
4611 {
4612 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
4613 uint32_t u32Access = 0;
4614 if (!pMixedCtx->ldtr.Attr.u)
4615 u32Access = X86DESCATTR_UNUSABLE;
4616 else
4617 u32Access = pMixedCtx->ldtr.Attr.u;
4618
4619 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel); AssertRCReturn(rc, rc);
4620 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit); AssertRCReturn(rc, rc);
4621 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base); AssertRCReturn(rc, rc);
4622 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRCReturn(rc, rc);
4623
4624 /* Validate. */
4625 if (!(u32Access & X86DESCATTR_UNUSABLE))
4626 {
4627 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
4628 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
4629 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
4630 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
4631 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
4632 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
4633 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
4634 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
4635 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
4636 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
4637 }
4638
4639 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
4640 Log4(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base));
4641 }
4642
4643 /*
4644 * Guest IDTR.
4645 */
4646 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))
4647 {
4648 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); AssertRCReturn(rc, rc);
4649 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); AssertRCReturn(rc, rc);
4650
4651 /* Validate. */
4652 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4653
4654 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);
4655 Log4(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt));
4656 }
4657
4658 return VINF_SUCCESS;
4659}
4660
4661
4662/**
4663 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
4664 * areas. These MSRs will automatically be loaded to the host CPU on every
4665 * successful VM entry and stored from the host CPU on every successful VM-exit.
4666 *
4667 * This also creates/updates MSR slots for the host MSRs. The actual host
4668 * MSR values are -not- updated here for performance reasons. See
4669 * hmR0VmxSaveHostMsrs().
4670 *
4671 * Also loads the sysenter MSRs into the guest-state area in the VMCS.
4672 *
4673 * @returns VBox status code.
4674 * @param pVCpu Pointer to the VMCPU.
4675 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4676 * out-of-sync. Make sure to update the required fields
4677 * before using them.
4678 *
4679 * @remarks No-long-jump zone!!!
4680 */
4681static int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4682{
4683 AssertPtr(pVCpu);
4684 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
4685
4686 /*
4687 * MSRs that we use the auto-load/store MSR area in the VMCS.
4688 */
4689 PVM pVM = pVCpu->CTX_SUFF(pVM);
4690 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS))
4691 {
4692 /* For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). */
4693#if HC_ARCH_BITS == 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4694 if (pVM->hm.s.fAllow64BitGuests)
4695 {
4696 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false /* fUpdateHostMsr */);
4697 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false /* fUpdateHostMsr */);
4698 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false /* fUpdateHostMsr */);
4699 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false /* fUpdateHostMsr */);
4700# ifdef DEBUG
4701 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
4702 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
4703 Log4(("Load: MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value));
4704# endif
4705 }
4706#endif
4707 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
4708 }
4709
4710 /*
4711 * Guest Sysenter MSRs.
4712 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
4713 * VM-exits on WRMSRs for these MSRs.
4714 */
4715 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR))
4716 {
4717 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc);
4718 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);
4719 }
4720
4721 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR))
4722 {
4723 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc);
4724 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
4725 }
4726
4727 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR))
4728 {
4729 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc);
4730 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
4731 }
4732
4733 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))
4734 {
4735 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
4736 {
4737 /*
4738 * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
4739 * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
4740 */
4741 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
4742 {
4743 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER);
4744 AssertRCReturn(rc,rc);
4745 Log4(("Load: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pMixedCtx->msrEFER));
4746 }
4747 else
4748 {
4749 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */);
4750 /* We need to intercept reads too, see @bugref{7386} comment #16. */
4751 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
4752 Log4(("Load: MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pMixedCtx->msrEFER,
4753 pVCpu->hm.s.vmx.cMsrs));
4754 }
4755 }
4756 else if (!pVM->hm.s.vmx.fSupportsVmcsEfer)
4757 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER);
4758 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
4759 }
4760
4761 return VINF_SUCCESS;
4762}
4763
4764
4765/**
4766 * Loads the guest activity state into the guest-state area in the VMCS.
4767 *
4768 * @returns VBox status code.
4769 * @param pVCpu Pointer to the VMCPU.
4770 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4771 * out-of-sync. Make sure to update the required fields
4772 * before using them.
4773 *
4774 * @remarks No-long-jump zone!!!
4775 */
4776static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
4777{
4778 NOREF(pCtx);
4779 /** @todo See if we can make use of other states, e.g.
4780 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
4781 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE))
4782 {
4783 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
4784 AssertRCReturn(rc, rc);
4785
4786 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE);
4787 }
4788 return VINF_SUCCESS;
4789}
4790
4791
4792/**
4793 * Sets up the appropriate function to run guest code.
4794 *
4795 * @returns VBox status code.
4796 * @param pVCpu Pointer to the VMCPU.
4797 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4798 * out-of-sync. Make sure to update the required fields
4799 * before using them.
4800 *
4801 * @remarks No-long-jump zone!!!
4802 */
4803static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4804{
4805 if (CPUMIsGuestInLongModeEx(pMixedCtx))
4806 {
4807#ifndef VBOX_ENABLE_64_BITS_GUESTS
4808 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4809#endif
4810 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
4811#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4812 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
4813 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
4814 {
4815 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4816 {
4817 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4818 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT
4819 | HM_CHANGED_VMX_EXIT_CTLS
4820 | HM_CHANGED_VMX_ENTRY_CTLS
4821 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
4822 }
4823 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
4824 }
4825#else
4826 /* 64-bit host or hybrid host. */
4827 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
4828#endif
4829 }
4830 else
4831 {
4832 /* Guest is not in long mode, use the 32-bit handler. */
4833#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4834 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32)
4835 {
4836 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4837 {
4838 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4839 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT
4840 | HM_CHANGED_VMX_EXIT_CTLS
4841 | HM_CHANGED_VMX_ENTRY_CTLS
4842 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
4843 }
4844 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4845 }
4846#else
4847 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4848#endif
4849 }
4850 Assert(pVCpu->hm.s.vmx.pfnStartVM);
4851 return VINF_SUCCESS;
4852}
4853
4854
4855/**
4856 * Wrapper for running the guest code in VT-x.
4857 *
4858 * @returns VBox strict status code.
4859 * @param pVM Pointer to the VM.
4860 * @param pVCpu Pointer to the VMCPU.
4861 * @param pCtx Pointer to the guest-CPU context.
4862 *
4863 * @remarks No-long-jump zone!!!
4864 */
4865DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4866{
4867 /*
4868 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
4869 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
4870 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
4871 */
4872 const bool fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
4873 /** @todo Add stats for resume vs launch. */
4874#ifdef VBOX_WITH_KERNEL_USING_XMM
4875 return HMR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
4876#else
4877 return pVCpu->hm.s.vmx.pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
4878#endif
4879}
4880
4881
4882/**
4883 * Reports world-switch error and dumps some useful debug info.
4884 *
4885 * @param pVM Pointer to the VM.
4886 * @param pVCpu Pointer to the VMCPU.
4887 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
4888 * @param pCtx Pointer to the guest-CPU context.
4889 * @param pVmxTransient Pointer to the VMX transient structure (only
4890 * exitReason updated).
4891 */
4892static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
4893{
4894 Assert(pVM);
4895 Assert(pVCpu);
4896 Assert(pCtx);
4897 Assert(pVmxTransient);
4898 HMVMX_ASSERT_PREEMPT_SAFE();
4899
4900 Log4(("VM-entry failure: %Rrc\n", rcVMRun));
4901 switch (rcVMRun)
4902 {
4903 case VERR_VMX_INVALID_VMXON_PTR:
4904 AssertFailed();
4905 break;
4906 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
4907 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
4908 {
4909 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
4910 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
4911 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
4912 AssertRC(rc);
4913
4914 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
4915 /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted().
4916 Cannot do it here as we may have been long preempted. */
4917
4918#ifdef VBOX_STRICT
4919 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
4920 pVmxTransient->uExitReason));
4921 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));
4922 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
4923 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
4924 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
4925 else
4926 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
4927 Log4(("Entered host CPU %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));
4928 Log4(("Current host CPU %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));
4929
4930 /* VMX control bits. */
4931 uint32_t u32Val;
4932 uint64_t u64Val;
4933 HMVMXHCUINTREG uHCReg;
4934 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
4935 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
4936 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
4937 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
4938 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
4939 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
4940 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
4941 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
4942 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
4943 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
4944 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
4945 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
4946 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
4947 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
4948 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
4949 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
4950 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
4951 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
4952 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
4953 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
4954 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
4955 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
4956 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4957 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
4958 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4959 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
4960 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
4961 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
4962 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
4963 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
4964 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
4965 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
4966 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
4967 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
4968 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
4969 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4970 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
4971 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
4972 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
4973 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4974 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
4975 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
4976
4977 /* Guest bits. */
4978 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc);
4979 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val));
4980 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc);
4981 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val));
4982 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
4983 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
4984 rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc);
4985 Log4(("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val));
4986
4987 /* Host bits. */
4988 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
4989 Log4(("Host CR0 %#RHr\n", uHCReg));
4990 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
4991 Log4(("Host CR3 %#RHr\n", uHCReg));
4992 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
4993 Log4(("Host CR4 %#RHr\n", uHCReg));
4994
4995 RTGDTR HostGdtr;
4996 PCX86DESCHC pDesc;
4997 ASMGetGDTR(&HostGdtr);
4998 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_CS, &u32Val); AssertRC(rc);
4999 Log4(("Host CS %#08x\n", u32Val));
5000 if (u32Val < HostGdtr.cbGdt)
5001 {
5002 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5003 HMR0DumpDescriptor(pDesc, u32Val, "CS: ");
5004 }
5005
5006 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc);
5007 Log4(("Host DS %#08x\n", u32Val));
5008 if (u32Val < HostGdtr.cbGdt)
5009 {
5010 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5011 HMR0DumpDescriptor(pDesc, u32Val, "DS: ");
5012 }
5013
5014 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc);
5015 Log4(("Host ES %#08x\n", u32Val));
5016 if (u32Val < HostGdtr.cbGdt)
5017 {
5018 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5019 HMR0DumpDescriptor(pDesc, u32Val, "ES: ");
5020 }
5021
5022 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc);
5023 Log4(("Host FS %#08x\n", u32Val));
5024 if (u32Val < HostGdtr.cbGdt)
5025 {
5026 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5027 HMR0DumpDescriptor(pDesc, u32Val, "FS: ");
5028 }
5029
5030 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc);
5031 Log4(("Host GS %#08x\n", u32Val));
5032 if (u32Val < HostGdtr.cbGdt)
5033 {
5034 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5035 HMR0DumpDescriptor(pDesc, u32Val, "GS: ");
5036 }
5037
5038 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc);
5039 Log4(("Host SS %#08x\n", u32Val));
5040 if (u32Val < HostGdtr.cbGdt)
5041 {
5042 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5043 HMR0DumpDescriptor(pDesc, u32Val, "SS: ");
5044 }
5045
5046 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc);
5047 Log4(("Host TR %#08x\n", u32Val));
5048 if (u32Val < HostGdtr.cbGdt)
5049 {
5050 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5051 HMR0DumpDescriptor(pDesc, u32Val, "TR: ");
5052 }
5053
5054 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
5055 Log4(("Host TR Base %#RHv\n", uHCReg));
5056 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
5057 Log4(("Host GDTR Base %#RHv\n", uHCReg));
5058 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
5059 Log4(("Host IDTR Base %#RHv\n", uHCReg));
5060 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
5061 Log4(("Host SYSENTER CS %#08x\n", u32Val));
5062 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
5063 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg));
5064 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
5065 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg));
5066 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
5067 Log4(("Host RSP %#RHv\n", uHCReg));
5068 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
5069 Log4(("Host RIP %#RHv\n", uHCReg));
5070# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
5071 if (HMVMX_IS_64BIT_HOST_MODE())
5072 {
5073 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
5074 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
5075 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
5076 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
5077 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
5078 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
5079 }
5080# endif
5081#endif /* VBOX_STRICT */
5082 break;
5083 }
5084
5085 default:
5086 /* Impossible */
5087 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
5088 break;
5089 }
5090 NOREF(pVM); NOREF(pCtx);
5091}
5092
5093
5094#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
5095#ifndef VMX_USE_CACHED_VMCS_ACCESSES
5096# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
5097#endif
5098#ifdef VBOX_STRICT
5099static bool hmR0VmxIsValidWriteField(uint32_t idxField)
5100{
5101 switch (idxField)
5102 {
5103 case VMX_VMCS_GUEST_RIP:
5104 case VMX_VMCS_GUEST_RSP:
5105 case VMX_VMCS_GUEST_SYSENTER_EIP:
5106 case VMX_VMCS_GUEST_SYSENTER_ESP:
5107 case VMX_VMCS_GUEST_GDTR_BASE:
5108 case VMX_VMCS_GUEST_IDTR_BASE:
5109 case VMX_VMCS_GUEST_CS_BASE:
5110 case VMX_VMCS_GUEST_DS_BASE:
5111 case VMX_VMCS_GUEST_ES_BASE:
5112 case VMX_VMCS_GUEST_FS_BASE:
5113 case VMX_VMCS_GUEST_GS_BASE:
5114 case VMX_VMCS_GUEST_SS_BASE:
5115 case VMX_VMCS_GUEST_LDTR_BASE:
5116 case VMX_VMCS_GUEST_TR_BASE:
5117 case VMX_VMCS_GUEST_CR3:
5118 return true;
5119 }
5120 return false;
5121}
5122
5123static bool hmR0VmxIsValidReadField(uint32_t idxField)
5124{
5125 switch (idxField)
5126 {
5127 /* Read-only fields. */
5128 case VMX_VMCS_RO_EXIT_QUALIFICATION:
5129 return true;
5130 }
5131 /* Remaining readable fields should also be writable. */
5132 return hmR0VmxIsValidWriteField(idxField);
5133}
5134#endif /* VBOX_STRICT */
5135
5136
5137/**
5138 * Executes the specified handler in 64-bit mode.
5139 *
5140 * @returns VBox status code.
5141 * @param pVM Pointer to the VM.
5142 * @param pVCpu Pointer to the VMCPU.
5143 * @param pCtx Pointer to the guest CPU context.
5144 * @param enmOp The operation to perform.
5145 * @param cbParam Number of parameters.
5146 * @param paParam Array of 32-bit parameters.
5147 */
5148VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
5149 uint32_t *paParam)
5150{
5151 int rc, rc2;
5152 PHMGLOBALCPUINFO pCpu;
5153 RTHCPHYS HCPhysCpuPage;
5154 RTCCUINTREG uOldEflags;
5155
5156 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
5157 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
5158 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
5159 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
5160
5161#ifdef VBOX_STRICT
5162 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
5163 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
5164
5165 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
5166 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
5167#endif
5168
5169 /* Disable interrupts. */
5170 uOldEflags = ASMIntDisableFlags();
5171
5172#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
5173 RTCPUID idHostCpu = RTMpCpuId();
5174 CPUMR0SetLApic(pVCpu, idHostCpu);
5175#endif
5176
5177 pCpu = HMR0GetCurrentCpu();
5178 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
5179
5180 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
5181 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5182
5183 /* Leave VMX Root Mode. */
5184 VMXDisable();
5185
5186 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
5187
5188 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
5189 CPUMSetHyperEIP(pVCpu, enmOp);
5190 for (int i = (int)cbParam - 1; i >= 0; i--)
5191 CPUMPushHyper(pVCpu, paParam[i]);
5192
5193 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
5194
5195 /* Call the switcher. */
5196 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
5197 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
5198
5199 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
5200 /* Make sure the VMX instructions don't cause #UD faults. */
5201 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
5202
5203 /* Re-enter VMX Root Mode */
5204 rc2 = VMXEnable(HCPhysCpuPage);
5205 if (RT_FAILURE(rc2))
5206 {
5207 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
5208 ASMSetFlags(uOldEflags);
5209 return rc2;
5210 }
5211
5212 rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5213 AssertRC(rc2);
5214 Assert(!(ASMGetFlags() & X86_EFL_IF));
5215 ASMSetFlags(uOldEflags);
5216 return rc;
5217}
5218
5219
5220/**
5221 * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts
5222 * supporting 64-bit guests.
5223 *
5224 * @returns VBox status code.
5225 * @param fResume Whether to VMLAUNCH or VMRESUME.
5226 * @param pCtx Pointer to the guest-CPU context.
5227 * @param pCache Pointer to the VMCS cache.
5228 * @param pVM Pointer to the VM.
5229 * @param pVCpu Pointer to the VMCPU.
5230 */
5231DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
5232{
5233 uint32_t aParam[6];
5234 PHMGLOBALCPUINFO pCpu = NULL;
5235 RTHCPHYS HCPhysCpuPage = 0;
5236 int rc = VERR_INTERNAL_ERROR_5;
5237
5238 pCpu = HMR0GetCurrentCpu();
5239 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
5240
5241#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5242 pCache->uPos = 1;
5243 pCache->interPD = PGMGetInterPaeCR3(pVM);
5244 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
5245#endif
5246
5247#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5248 pCache->TestIn.HCPhysCpuPage = 0;
5249 pCache->TestIn.HCPhysVmcs = 0;
5250 pCache->TestIn.pCache = 0;
5251 pCache->TestOut.HCPhysVmcs = 0;
5252 pCache->TestOut.pCache = 0;
5253 pCache->TestOut.pCtx = 0;
5254 pCache->TestOut.eflags = 0;
5255#endif
5256
5257 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
5258 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
5259 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
5260 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
5261 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
5262 aParam[5] = 0;
5263
5264#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5265 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
5266 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
5267#endif
5268 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, 6, &aParam[0]);
5269
5270#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5271 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
5272 Assert(pCtx->dr[4] == 10);
5273 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
5274#endif
5275
5276#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5277 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
5278 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5279 pVCpu->hm.s.vmx.HCPhysVmcs));
5280 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5281 pCache->TestOut.HCPhysVmcs));
5282 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
5283 pCache->TestOut.pCache));
5284 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
5285 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
5286 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
5287 pCache->TestOut.pCtx));
5288 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
5289#endif
5290 return rc;
5291}
5292
5293
5294/**
5295 * Initialize the VMCS-Read cache. The VMCS cache is used for 32-bit hosts
5296 * running 64-bit guests (except 32-bit Darwin which runs with 64-bit paging in
5297 * 32-bit mode) for 64-bit fields that cannot be accessed in 32-bit mode. Some
5298 * 64-bit fields -can- be accessed (those that have a 32-bit FULL & HIGH part).
5299 *
5300 * @returns VBox status code.
5301 * @param pVM Pointer to the VM.
5302 * @param pVCpu Pointer to the VMCPU.
5303 */
5304static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
5305{
5306#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
5307{ \
5308 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
5309 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
5310 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
5311 ++cReadFields; \
5312}
5313
5314 AssertPtr(pVM);
5315 AssertPtr(pVCpu);
5316 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5317 uint32_t cReadFields = 0;
5318
5319 /*
5320 * Don't remove the #if 0'd fields in this code. They're listed here for consistency
5321 * and serve to indicate exceptions to the rules.
5322 */
5323
5324 /* Guest-natural selector base fields. */
5325#if 0
5326 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
5327 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
5328 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
5329#endif
5330 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
5331 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
5332 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
5333 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
5334 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
5335 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
5336 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
5337 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
5338 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
5339 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
5340 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
5341 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
5342#if 0
5343 /* Unused natural width guest-state fields. */
5344 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
5345 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
5346#endif
5347 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
5348 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
5349
5350 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
5351#if 0
5352 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
5353 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
5354 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
5355 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
5356 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
5357 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
5358 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
5359 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
5360 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
5361#endif
5362
5363 /* Natural width guest-state fields. */
5364 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
5365#if 0
5366 /* Currently unused field. */
5367 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
5368#endif
5369
5370 if (pVM->hm.s.fNestedPaging)
5371 {
5372 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
5373 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
5374 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
5375 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
5376 }
5377 else
5378 {
5379 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
5380 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
5381 }
5382
5383#undef VMXLOCAL_INIT_READ_CACHE_FIELD
5384 return VINF_SUCCESS;
5385}
5386
5387
5388/**
5389 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
5390 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
5391 * darwin, running 64-bit guests).
5392 *
5393 * @returns VBox status code.
5394 * @param pVCpu Pointer to the VMCPU.
5395 * @param idxField The VMCS field encoding.
5396 * @param u64Val 16, 32 or 64-bit value.
5397 */
5398VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5399{
5400 int rc;
5401 switch (idxField)
5402 {
5403 /*
5404 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
5405 */
5406 /* 64-bit Control fields. */
5407 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
5408 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
5409 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
5410 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
5411 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
5412 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
5413 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
5414 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
5415 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
5416 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
5417 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
5418 case VMX_VMCS64_CTRL_EPTP_FULL:
5419 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
5420 /* 64-bit Guest-state fields. */
5421 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
5422 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
5423 case VMX_VMCS64_GUEST_PAT_FULL:
5424 case VMX_VMCS64_GUEST_EFER_FULL:
5425 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
5426 case VMX_VMCS64_GUEST_PDPTE0_FULL:
5427 case VMX_VMCS64_GUEST_PDPTE1_FULL:
5428 case VMX_VMCS64_GUEST_PDPTE2_FULL:
5429 case VMX_VMCS64_GUEST_PDPTE3_FULL:
5430 /* 64-bit Host-state fields. */
5431 case VMX_VMCS64_HOST_FIELD_PAT_FULL:
5432 case VMX_VMCS64_HOST_FIELD_EFER_FULL:
5433 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
5434 {
5435 rc = VMXWriteVmcs32(idxField, u64Val);
5436 rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32));
5437 break;
5438 }
5439
5440 /*
5441 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
5442 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
5443 */
5444 /* Natural-width Guest-state fields. */
5445 case VMX_VMCS_GUEST_CR3:
5446 case VMX_VMCS_GUEST_ES_BASE:
5447 case VMX_VMCS_GUEST_CS_BASE:
5448 case VMX_VMCS_GUEST_SS_BASE:
5449 case VMX_VMCS_GUEST_DS_BASE:
5450 case VMX_VMCS_GUEST_FS_BASE:
5451 case VMX_VMCS_GUEST_GS_BASE:
5452 case VMX_VMCS_GUEST_LDTR_BASE:
5453 case VMX_VMCS_GUEST_TR_BASE:
5454 case VMX_VMCS_GUEST_GDTR_BASE:
5455 case VMX_VMCS_GUEST_IDTR_BASE:
5456 case VMX_VMCS_GUEST_RSP:
5457 case VMX_VMCS_GUEST_RIP:
5458 case VMX_VMCS_GUEST_SYSENTER_ESP:
5459 case VMX_VMCS_GUEST_SYSENTER_EIP:
5460 {
5461 if (!(u64Val >> 32))
5462 {
5463 /* If this field is 64-bit, VT-x will zero out the top bits. */
5464 rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
5465 }
5466 else
5467 {
5468 /* Assert that only the 32->64 switcher case should ever come here. */
5469 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
5470 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
5471 }
5472 break;
5473 }
5474
5475 default:
5476 {
5477 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
5478 rc = VERR_INVALID_PARAMETER;
5479 break;
5480 }
5481 }
5482 AssertRCReturn(rc, rc);
5483 return rc;
5484}
5485
5486
5487/**
5488 * Queue up a VMWRITE by using the VMCS write cache. This is only used on 32-bit
5489 * hosts (except darwin) for 64-bit guests.
5490 *
5491 * @param pVCpu Pointer to the VMCPU.
5492 * @param idxField The VMCS field encoding.
5493 * @param u64Val 16, 32 or 64-bit value.
5494 */
5495VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5496{
5497 AssertPtr(pVCpu);
5498 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5499
5500 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
5501 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
5502
5503 /* Make sure there are no duplicates. */
5504 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
5505 {
5506 if (pCache->Write.aField[i] == idxField)
5507 {
5508 pCache->Write.aFieldVal[i] = u64Val;
5509 return VINF_SUCCESS;
5510 }
5511 }
5512
5513 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
5514 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
5515 pCache->Write.cValidEntries++;
5516 return VINF_SUCCESS;
5517}
5518
5519/* Enable later when the assembly code uses these as callbacks. */
5520#if 0
5521/*
5522 * Loads the VMCS write-cache into the CPU (by executing VMWRITEs).
5523 *
5524 * @param pVCpu Pointer to the VMCPU.
5525 * @param pCache Pointer to the VMCS cache.
5526 *
5527 * @remarks No-long-jump zone!!!
5528 */
5529VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache)
5530{
5531 AssertPtr(pCache);
5532 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
5533 {
5534 int rc = VMXWriteVmcs64(pCache->Write.aField[i], pCache->Write.aFieldVal[i]);
5535 AssertRC(rc);
5536 }
5537 pCache->Write.cValidEntries = 0;
5538}
5539
5540
5541/**
5542 * Stores the VMCS read-cache from the CPU (by executing VMREADs).
5543 *
5544 * @param pVCpu Pointer to the VMCPU.
5545 * @param pCache Pointer to the VMCS cache.
5546 *
5547 * @remarks No-long-jump zone!!!
5548 */
5549VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache)
5550{
5551 AssertPtr(pCache);
5552 for (uint32_t i = 0; i < pCache->Read.cValidEntries; i++)
5553 {
5554 int rc = VMXReadVmcs64(pCache->Read.aField[i], &pCache->Read.aFieldVal[i]);
5555 AssertRC(rc);
5556 }
5557}
5558#endif
5559#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
5560
5561
5562/**
5563 * Sets up the usage of TSC-offsetting and updates the VMCS. If offsetting is
5564 * not possible, cause VM-exits on RDTSC(P)s. Also sets up the VMX preemption
5565 * timer.
5566 *
5567 * @returns VBox status code.
5568 * @param pVCpu Pointer to the VMCPU.
5569 *
5570 * @remarks No-long-jump zone!!!
5571 */
5572static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu)
5573{
5574 int rc = VERR_INTERNAL_ERROR_5;
5575 bool fOffsettedTsc = false;
5576 bool fParavirtTsc = false;
5577 PVM pVM = pVCpu->CTX_SUFF(pVM);
5578 if (pVM->hm.s.vmx.fUsePreemptTimer)
5579 {
5580 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &fParavirtTsc,
5581 &pVCpu->hm.s.vmx.u64TSCOffset);
5582
5583 /* Make sure the returned values have sane upper and lower boundaries. */
5584 uint64_t u64CpuHz = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
5585 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
5586 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
5587 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
5588
5589 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
5590 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);
5591 }
5592 else
5593 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc);
5594
5595#if 1
5596 if (fParavirtTsc)
5597 {
5598#if 1
5599 uint64_t const u64CurTsc = ASMReadTSC();
5600 uint64_t const u64LastTick = TMCpuTickGetLastSeen(pVCpu);
5601 if (u64CurTsc + pVCpu->hm.s.vmx.u64TSCOffset < u64LastTick)
5602 {
5603 pVCpu->hm.s.vmx.u64TSCOffset = (u64LastTick - u64CurTsc);
5604 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffsetAdjusted);
5605 }
5606
5607 Assert(u64CurTsc + pVCpu->hm.s.vmx.u64TSCOffset >= u64LastTick);
5608#endif
5609 rc = GIMR0UpdateParavirtTsc(pVM, pVCpu->hm.s.vmx.u64TSCOffset);
5610 AssertRC(rc);
5611 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
5612 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); AssertRC(rc);
5613
5614 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5615 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5616 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
5617 }
5618 else
5619#else
5620 if (fParavirtTsc)
5621 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
5622#endif
5623 if (fOffsettedTsc)
5624 {
5625 uint64_t u64CurTSC = ASMReadTSC();
5626 if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
5627 {
5628 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
5629 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
5630
5631 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5632 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5633 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
5634 }
5635 else
5636 {
5637 /* VM-exit on RDTSC(P) as we would otherwise pass decreasing TSC values to the guest. */
5638 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5639 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5640 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
5641 }
5642 }
5643 else
5644 {
5645 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
5646 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5647 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5648 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
5649 }
5650}
5651
5652
5653/**
5654 * Determines if an exception is a contributory exception. Contributory
5655 * exceptions are ones which can cause double-faults. Page-fault is
5656 * intentionally not included here as it's a conditional contributory exception.
5657 *
5658 * @returns true if the exception is contributory, false otherwise.
5659 * @param uVector The exception vector.
5660 */
5661DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
5662{
5663 switch (uVector)
5664 {
5665 case X86_XCPT_GP:
5666 case X86_XCPT_SS:
5667 case X86_XCPT_NP:
5668 case X86_XCPT_TS:
5669 case X86_XCPT_DE:
5670 return true;
5671 default:
5672 break;
5673 }
5674 return false;
5675}
5676
5677
5678/**
5679 * Sets an event as a pending event to be injected into the guest.
5680 *
5681 * @param pVCpu Pointer to the VMCPU.
5682 * @param u32IntInfo The VM-entry interruption-information field.
5683 * @param cbInstr The VM-entry instruction length in bytes (for software
5684 * interrupts, exceptions and privileged software
5685 * exceptions).
5686 * @param u32ErrCode The VM-entry exception error code.
5687 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
5688 * page-fault.
5689 *
5690 * @remarks Statistics counter assumes this is a guest event being injected or
5691 * re-injected into the guest, i.e. 'StatInjectPendingReflect' is
5692 * always incremented.
5693 */
5694DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
5695 RTGCUINTPTR GCPtrFaultAddress)
5696{
5697 Assert(!pVCpu->hm.s.Event.fPending);
5698 pVCpu->hm.s.Event.fPending = true;
5699 pVCpu->hm.s.Event.u64IntInfo = u32IntInfo;
5700 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
5701 pVCpu->hm.s.Event.cbInstr = cbInstr;
5702 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
5703
5704 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
5705}
5706
5707
5708/**
5709 * Sets a double-fault (#DF) exception as pending-for-injection into the VM.
5710 *
5711 * @param pVCpu Pointer to the VMCPU.
5712 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5713 * out-of-sync. Make sure to update the required fields
5714 * before using them.
5715 */
5716DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5717{
5718 NOREF(pMixedCtx);
5719 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
5720 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5721 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5722 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5723}
5724
5725
5726/**
5727 * Handle a condition that occurred while delivering an event through the guest
5728 * IDT.
5729 *
5730 * @returns VBox status code (informational error codes included).
5731 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5732 * @retval VINF_HM_DOUBLE_FAULT if a #DF condition was detected and we ought to
5733 * continue execution of the guest which will delivery the #DF.
5734 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5735 *
5736 * @param pVCpu Pointer to the VMCPU.
5737 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5738 * out-of-sync. Make sure to update the required fields
5739 * before using them.
5740 * @param pVmxTransient Pointer to the VMX transient structure.
5741 *
5742 * @remarks No-long-jump zone!!!
5743 */
5744static int hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
5745{
5746 int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
5747 AssertRCReturn(rc, rc);
5748 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
5749 {
5750 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
5751 AssertRCReturn(rc, rc);
5752
5753 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
5754 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
5755 uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
5756
5757 typedef enum
5758 {
5759 VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
5760 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
5761 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
5762 VMXREFLECTXCPT_NONE /* Nothing to reflect. */
5763 } VMXREFLECTXCPT;
5764
5765 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
5766 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
5767 if (VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo))
5768 {
5769 if (uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
5770 {
5771 enmReflect = VMXREFLECTXCPT_XCPT;
5772#ifdef VBOX_STRICT
5773 if ( hmR0VmxIsContributoryXcpt(uIdtVector)
5774 && uExitVector == X86_XCPT_PF)
5775 {
5776 Log4(("IDT: vcpu[%RU32] Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5777 }
5778#endif
5779 if ( uExitVector == X86_XCPT_PF
5780 && uIdtVector == X86_XCPT_PF)
5781 {
5782 pVmxTransient->fVectoringPF = true;
5783 Log4(("IDT: vcpu[%RU32] Vectoring #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5784 }
5785 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
5786 && hmR0VmxIsContributoryXcpt(uExitVector)
5787 && ( hmR0VmxIsContributoryXcpt(uIdtVector)
5788 || uIdtVector == X86_XCPT_PF))
5789 {
5790 enmReflect = VMXREFLECTXCPT_DF;
5791 }
5792 else if (uIdtVector == X86_XCPT_DF)
5793 enmReflect = VMXREFLECTXCPT_TF;
5794 }
5795 else if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5796 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
5797 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
5798 {
5799 /*
5800 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and
5801 * privileged software exception (#DB from ICEBP) as they reoccur when restarting the instruction.
5802 */
5803 enmReflect = VMXREFLECTXCPT_XCPT;
5804 }
5805 }
5806 else
5807 {
5808 /*
5809 * If event delivery caused an EPT violation/misconfig or APIC access VM-exit, then the VM-exit
5810 * interruption-information will not be valid and we end up here. In such cases, it is sufficient to reflect the
5811 * original exception to the guest after handling the VM-exit.
5812 */
5813 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5814 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
5815 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
5816 {
5817 enmReflect = VMXREFLECTXCPT_XCPT;
5818 }
5819 }
5820
5821 switch (enmReflect)
5822 {
5823 case VMXREFLECTXCPT_XCPT:
5824 {
5825 Assert( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5826 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5827 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
5828
5829 uint32_t u32ErrCode = 0;
5830 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo))
5831 {
5832 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
5833 AssertRCReturn(rc, rc);
5834 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5835 }
5836
5837 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
5838 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
5839 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
5840 rc = VINF_SUCCESS;
5841 Log4(("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu,
5842 pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.u32ErrCode));
5843
5844 break;
5845 }
5846
5847 case VMXREFLECTXCPT_DF:
5848 {
5849 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
5850 rc = VINF_HM_DOUBLE_FAULT;
5851 Log4(("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,
5852 pVCpu->hm.s.Event.u64IntInfo, uIdtVector, uExitVector));
5853
5854 break;
5855 }
5856
5857 case VMXREFLECTXCPT_TF:
5858 {
5859 rc = VINF_EM_RESET;
5860 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector,
5861 uExitVector));
5862 break;
5863 }
5864
5865 default:
5866 Assert(rc == VINF_SUCCESS);
5867 break;
5868 }
5869 }
5870 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET);
5871 return rc;
5872}
5873
5874
5875/**
5876 * Saves the guest's CR0 register from the VMCS into the guest-CPU context.
5877 *
5878 * @returns VBox status code.
5879 * @param pVCpu Pointer to the VMCPU.
5880 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5881 * out-of-sync. Make sure to update the required fields
5882 * before using them.
5883 *
5884 * @remarks No-long-jump zone!!!
5885 */
5886static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5887{
5888 NOREF(pMixedCtx);
5889
5890 /*
5891 * While in the middle of saving guest-CR0, we could get preempted and re-invoked from the preemption hook,
5892 * see hmR0VmxLeave(). Safer to just make this code non-preemptible.
5893 */
5894 VMMRZCallRing3Disable(pVCpu);
5895 HM_DISABLE_PREEMPT_IF_NEEDED();
5896
5897 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0))
5898 {
5899 uint32_t uVal = 0;
5900 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal);
5901 AssertRCReturn(rc, rc);
5902
5903 uint32_t uShadow = 0;
5904 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
5905 AssertRCReturn(rc, rc);
5906
5907 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR0Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR0Mask);
5908 CPUMSetGuestCR0(pVCpu, uVal);
5909 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0);
5910 }
5911
5912 HM_RESTORE_PREEMPT_IF_NEEDED();
5913 VMMRZCallRing3Enable(pVCpu);
5914 return VINF_SUCCESS;
5915}
5916
5917
5918/**
5919 * Saves the guest's CR4 register from the VMCS into the guest-CPU context.
5920 *
5921 * @returns VBox status code.
5922 * @param pVCpu Pointer to the VMCPU.
5923 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5924 * out-of-sync. Make sure to update the required fields
5925 * before using them.
5926 *
5927 * @remarks No-long-jump zone!!!
5928 */
5929static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5930{
5931 NOREF(pMixedCtx);
5932
5933 int rc = VINF_SUCCESS;
5934 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4))
5935 {
5936 uint32_t uVal = 0;
5937 uint32_t uShadow = 0;
5938 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal);
5939 AssertRCReturn(rc, rc);
5940 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
5941 AssertRCReturn(rc, rc);
5942
5943 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR4Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR4Mask);
5944 CPUMSetGuestCR4(pVCpu, uVal);
5945 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4);
5946 }
5947 return rc;
5948}
5949
5950
5951/**
5952 * Saves the guest's RIP register from the VMCS into the guest-CPU context.
5953 *
5954 * @returns VBox status code.
5955 * @param pVCpu Pointer to the VMCPU.
5956 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5957 * out-of-sync. Make sure to update the required fields
5958 * before using them.
5959 *
5960 * @remarks No-long-jump zone!!!
5961 */
5962static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5963{
5964 int rc = VINF_SUCCESS;
5965 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP))
5966 {
5967 uint64_t u64Val = 0;
5968 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
5969 AssertRCReturn(rc, rc);
5970
5971 pMixedCtx->rip = u64Val;
5972 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP);
5973 }
5974 return rc;
5975}
5976
5977
5978/**
5979 * Saves the guest's RSP register from the VMCS into the guest-CPU context.
5980 *
5981 * @returns VBox status code.
5982 * @param pVCpu Pointer to the VMCPU.
5983 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5984 * out-of-sync. Make sure to update the required fields
5985 * before using them.
5986 *
5987 * @remarks No-long-jump zone!!!
5988 */
5989static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5990{
5991 int rc = VINF_SUCCESS;
5992 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP))
5993 {
5994 uint64_t u64Val = 0;
5995 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
5996 AssertRCReturn(rc, rc);
5997
5998 pMixedCtx->rsp = u64Val;
5999 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP);
6000 }
6001 return rc;
6002}
6003
6004
6005/**
6006 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
6007 *
6008 * @returns VBox status code.
6009 * @param pVCpu Pointer to the VMCPU.
6010 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6011 * out-of-sync. Make sure to update the required fields
6012 * before using them.
6013 *
6014 * @remarks No-long-jump zone!!!
6015 */
6016static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6017{
6018 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS))
6019 {
6020 uint32_t uVal = 0;
6021 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal);
6022 AssertRCReturn(rc, rc);
6023
6024 pMixedCtx->eflags.u32 = uVal;
6025 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) /* Undo our real-on-v86-mode changes to eflags if necessary. */
6026 {
6027 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
6028 Log4(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32));
6029
6030 pMixedCtx->eflags.Bits.u1VM = 0;
6031 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
6032 }
6033
6034 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS);
6035 }
6036 return VINF_SUCCESS;
6037}
6038
6039
6040/**
6041 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
6042 * guest-CPU context.
6043 */
6044DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6045{
6046 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6047 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
6048 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6049 return rc;
6050}
6051
6052
6053/**
6054 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
6055 * from the guest-state area in the VMCS.
6056 *
6057 * @param pVCpu Pointer to the VMCPU.
6058 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6059 * out-of-sync. Make sure to update the required fields
6060 * before using them.
6061 *
6062 * @remarks No-long-jump zone!!!
6063 */
6064static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6065{
6066 uint32_t uIntrState = 0;
6067 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
6068 AssertRC(rc);
6069
6070 if (!uIntrState)
6071 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6072 else
6073 {
6074 Assert( uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
6075 || uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
6076 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6077 AssertRC(rc);
6078 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */
6079 AssertRC(rc);
6080
6081 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
6082 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
6083 }
6084}
6085
6086
6087/**
6088 * Saves the guest's activity state.
6089 *
6090 * @returns VBox status code.
6091 * @param pVCpu Pointer to the VMCPU.
6092 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6093 * out-of-sync. Make sure to update the required fields
6094 * before using them.
6095 *
6096 * @remarks No-long-jump zone!!!
6097 */
6098static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6099{
6100 NOREF(pMixedCtx);
6101 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
6102 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_ACTIVITY_STATE);
6103 return VINF_SUCCESS;
6104}
6105
6106
6107/**
6108 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
6109 * the current VMCS into the guest-CPU context.
6110 *
6111 * @returns VBox status code.
6112 * @param pVCpu Pointer to the VMCPU.
6113 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6114 * out-of-sync. Make sure to update the required fields
6115 * before using them.
6116 *
6117 * @remarks No-long-jump zone!!!
6118 */
6119static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6120{
6121 int rc = VINF_SUCCESS;
6122 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
6123 {
6124 uint32_t u32Val = 0;
6125 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc);
6126 pMixedCtx->SysEnter.cs = u32Val;
6127 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR);
6128 }
6129
6130 uint64_t u64Val = 0;
6131 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
6132 {
6133 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &u64Val); AssertRCReturn(rc, rc);
6134 pMixedCtx->SysEnter.eip = u64Val;
6135 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR);
6136 }
6137 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
6138 {
6139 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &u64Val); AssertRCReturn(rc, rc);
6140 pMixedCtx->SysEnter.esp = u64Val;
6141 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR);
6142 }
6143 return rc;
6144}
6145
6146
6147/**
6148 * Saves the set of guest MSRs (that we restore lazily while leaving VT-x) from
6149 * the CPU back into the guest-CPU context.
6150 *
6151 * @returns VBox status code.
6152 * @param pVCpu Pointer to the VMCPU.
6153 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6154 * out-of-sync. Make sure to update the required fields
6155 * before using them.
6156 *
6157 * @remarks No-long-jump zone!!!
6158 */
6159static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6160{
6161#if HC_ARCH_BITS == 64
6162 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
6163 {
6164 /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */
6165 VMMRZCallRing3Disable(pVCpu);
6166 HM_DISABLE_PREEMPT_IF_NEEDED();
6167
6168 /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */
6169 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS))
6170 {
6171 hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx);
6172 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6173 }
6174
6175 HM_RESTORE_PREEMPT_IF_NEEDED();
6176 VMMRZCallRing3Enable(pVCpu);
6177 }
6178 else
6179 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6180#else
6181 NOREF(pMixedCtx);
6182 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6183#endif
6184
6185 return VINF_SUCCESS;
6186}
6187
6188
6189/**
6190 * Saves the auto load/store'd guest MSRs from the current VMCS into
6191 * the guest-CPU context.
6192 *
6193 * @returns VBox status code.
6194 * @param pVCpu Pointer to the VMCPU.
6195 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6196 * out-of-sync. Make sure to update the required fields
6197 * before using them.
6198 *
6199 * @remarks No-long-jump zone!!!
6200 */
6201static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6202{
6203 if (HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS))
6204 return VINF_SUCCESS;
6205
6206 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
6207 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
6208 Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", cMsrs));
6209 for (uint32_t i = 0; i < cMsrs; i++, pMsr++)
6210 {
6211 switch (pMsr->u32Msr)
6212 {
6213 case MSR_K8_TSC_AUX: CPUMR0SetGuestTscAux(pVCpu, pMsr->u64Value); break;
6214 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;
6215 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;
6216 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;
6217 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;
6218 case MSR_K6_EFER: /* Nothing to do here since we intercept writes, see hmR0VmxLoadGuestMsrs(). */
6219 break;
6220
6221 default:
6222 {
6223 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, cMsrs));
6224 pVCpu->hm.s.u32HMError = pMsr->u32Msr;
6225 return VERR_HM_UNEXPECTED_LD_ST_MSR;
6226 }
6227 }
6228 }
6229
6230 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS);
6231 return VINF_SUCCESS;
6232}
6233
6234
6235/**
6236 * Saves the guest control registers from the current VMCS into the guest-CPU
6237 * context.
6238 *
6239 * @returns VBox status code.
6240 * @param pVCpu Pointer to the VMCPU.
6241 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6242 * out-of-sync. Make sure to update the required fields
6243 * before using them.
6244 *
6245 * @remarks No-long-jump zone!!!
6246 */
6247static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6248{
6249 /* Guest CR0. Guest FPU. */
6250 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6251 AssertRCReturn(rc, rc);
6252
6253 /* Guest CR4. */
6254 rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
6255 AssertRCReturn(rc, rc);
6256
6257 /* Guest CR2 - updated always during the world-switch or in #PF. */
6258 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
6259 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3))
6260 {
6261 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
6262 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4));
6263
6264 PVM pVM = pVCpu->CTX_SUFF(pVM);
6265 if ( pVM->hm.s.vmx.fUnrestrictedGuest
6266 || ( pVM->hm.s.fNestedPaging
6267 && CPUMIsGuestPagingEnabledEx(pMixedCtx)))
6268 {
6269 uint64_t u64Val = 0;
6270 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
6271 if (pMixedCtx->cr3 != u64Val)
6272 {
6273 CPUMSetGuestCR3(pVCpu, u64Val);
6274 if (VMMRZCallRing3IsEnabled(pVCpu))
6275 {
6276 PGMUpdateCR3(pVCpu, u64Val);
6277 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6278 }
6279 else
6280 {
6281 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
6282 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
6283 }
6284 }
6285
6286 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
6287 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */
6288 {
6289 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
6290 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
6291 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
6292 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
6293
6294 if (VMMRZCallRing3IsEnabled(pVCpu))
6295 {
6296 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6297 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6298 }
6299 else
6300 {
6301 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
6302 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
6303 }
6304 }
6305 }
6306
6307 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3);
6308 }
6309
6310 /*
6311 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
6312 * -> VMMRZCallRing3Disable() -> hmR0VmxSaveGuestState() -> Set VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
6313 * -> continue with VM-exit handling -> hmR0VmxSaveGuestControlRegs() and here we are.
6314 *
6315 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
6316 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
6317 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
6318 * -NOT- check if HMVMX_UPDATED_GUEST_CR3 is already set or not!
6319 *
6320 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
6321 */
6322 if (VMMRZCallRing3IsEnabled(pVCpu))
6323 {
6324 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6325 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
6326
6327 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6328 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6329
6330 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6331 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6332 }
6333
6334 return rc;
6335}
6336
6337
6338/**
6339 * Reads a guest segment register from the current VMCS into the guest-CPU
6340 * context.
6341 *
6342 * @returns VBox status code.
6343 * @param pVCpu Pointer to the VMCPU.
6344 * @param idxSel Index of the selector in the VMCS.
6345 * @param idxLimit Index of the segment limit in the VMCS.
6346 * @param idxBase Index of the segment base in the VMCS.
6347 * @param idxAccess Index of the access rights of the segment in the VMCS.
6348 * @param pSelReg Pointer to the segment selector.
6349 *
6350 * @remarks No-long-jump zone!!!
6351 * @remarks Never call this function directly!!! Use the VMXLOCAL_READ_SEG()
6352 * macro as that takes care of whether to read from the VMCS cache or
6353 * not.
6354 */
6355DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
6356 PCPUMSELREG pSelReg)
6357{
6358 NOREF(pVCpu);
6359
6360 uint32_t u32Val = 0;
6361 int rc = VMXReadVmcs32(idxSel, &u32Val);
6362 AssertRCReturn(rc, rc);
6363 pSelReg->Sel = (uint16_t)u32Val;
6364 pSelReg->ValidSel = (uint16_t)u32Val;
6365 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6366
6367 rc = VMXReadVmcs32(idxLimit, &u32Val);
6368 AssertRCReturn(rc, rc);
6369 pSelReg->u32Limit = u32Val;
6370
6371 uint64_t u64Val = 0;
6372 rc = VMXReadVmcsGstNByIdxVal(idxBase, &u64Val);
6373 AssertRCReturn(rc, rc);
6374 pSelReg->u64Base = u64Val;
6375
6376 rc = VMXReadVmcs32(idxAccess, &u32Val);
6377 AssertRCReturn(rc, rc);
6378 pSelReg->Attr.u = u32Val;
6379
6380 /*
6381 * If VT-x marks the segment as unusable, most other bits remain undefined:
6382 * - For CS the L, D and G bits have meaning.
6383 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
6384 * - For the remaining data segments no bits are defined.
6385 *
6386 * The present bit and the unusable bit has been observed to be set at the
6387 * same time (the selector was supposed to be invalid as we started executing
6388 * a V8086 interrupt in ring-0).
6389 *
6390 * What should be important for the rest of the VBox code, is that the P bit is
6391 * cleared. Some of the other VBox code recognizes the unusable bit, but
6392 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
6393 * safe side here, we'll strip off P and other bits we don't care about. If
6394 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
6395 *
6396 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
6397 */
6398 if (pSelReg->Attr.u & X86DESCATTR_UNUSABLE)
6399 {
6400 Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR); /* TR is the only selector that can never be unusable. */
6401
6402 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
6403 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
6404 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
6405
6406 Log4(("hmR0VmxReadSegmentReg: Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Val, pSelReg->Attr.u));
6407#ifdef DEBUG_bird
6408 AssertMsg((u32Val & ~X86DESCATTR_P) == pSelReg->Attr.u,
6409 ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
6410 idxSel, u32Val, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
6411#endif
6412 }
6413 return VINF_SUCCESS;
6414}
6415
6416
6417#ifdef VMX_USE_CACHED_VMCS_ACCESSES
6418# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
6419 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
6420 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
6421#else
6422# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
6423 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
6424 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
6425#endif
6426
6427
6428/**
6429 * Saves the guest segment registers from the current VMCS into the guest-CPU
6430 * context.
6431 *
6432 * @returns VBox status code.
6433 * @param pVCpu Pointer to the VMCPU.
6434 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6435 * out-of-sync. Make sure to update the required fields
6436 * before using them.
6437 *
6438 * @remarks No-long-jump zone!!!
6439 */
6440static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6441{
6442 /* Guest segment registers. */
6443 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS))
6444 {
6445 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); AssertRCReturn(rc, rc);
6446 rc = VMXLOCAL_READ_SEG(CS, cs); AssertRCReturn(rc, rc);
6447 rc = VMXLOCAL_READ_SEG(SS, ss); AssertRCReturn(rc, rc);
6448 rc = VMXLOCAL_READ_SEG(DS, ds); AssertRCReturn(rc, rc);
6449 rc = VMXLOCAL_READ_SEG(ES, es); AssertRCReturn(rc, rc);
6450 rc = VMXLOCAL_READ_SEG(FS, fs); AssertRCReturn(rc, rc);
6451 rc = VMXLOCAL_READ_SEG(GS, gs); AssertRCReturn(rc, rc);
6452
6453 /* Restore segment attributes for real-on-v86 mode hack. */
6454 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6455 {
6456 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
6457 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
6458 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
6459 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
6460 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
6461 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
6462 }
6463 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS);
6464 }
6465
6466 return VINF_SUCCESS;
6467}
6468
6469
6470/**
6471 * Saves the guest descriptor table registers and task register from the current
6472 * VMCS into the guest-CPU context.
6473 *
6474 * @returns VBox status code.
6475 * @param pVCpu Pointer to the VMCPU.
6476 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6477 * out-of-sync. Make sure to update the required fields
6478 * before using them.
6479 *
6480 * @remarks No-long-jump zone!!!
6481 */
6482static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6483{
6484 int rc = VINF_SUCCESS;
6485
6486 /* Guest LDTR. */
6487 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR))
6488 {
6489 rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
6490 AssertRCReturn(rc, rc);
6491 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR);
6492 }
6493
6494 /* Guest GDTR. */
6495 uint64_t u64Val = 0;
6496 uint32_t u32Val = 0;
6497 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR))
6498 {
6499 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
6500 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
6501 pMixedCtx->gdtr.pGdt = u64Val;
6502 pMixedCtx->gdtr.cbGdt = u32Val;
6503 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR);
6504 }
6505
6506 /* Guest IDTR. */
6507 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR))
6508 {
6509 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
6510 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
6511 pMixedCtx->idtr.pIdt = u64Val;
6512 pMixedCtx->idtr.cbIdt = u32Val;
6513 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR);
6514 }
6515
6516 /* Guest TR. */
6517 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR))
6518 {
6519 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6520 AssertRCReturn(rc, rc);
6521
6522 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
6523 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6524 {
6525 rc = VMXLOCAL_READ_SEG(TR, tr);
6526 AssertRCReturn(rc, rc);
6527 }
6528 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR);
6529 }
6530 return rc;
6531}
6532
6533#undef VMXLOCAL_READ_SEG
6534
6535
6536/**
6537 * Saves the guest debug-register DR7 from the current VMCS into the guest-CPU
6538 * context.
6539 *
6540 * @returns VBox status code.
6541 * @param pVCpu Pointer to the VMCPU.
6542 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6543 * out-of-sync. Make sure to update the required fields
6544 * before using them.
6545 *
6546 * @remarks No-long-jump zone!!!
6547 */
6548static int hmR0VmxSaveGuestDR7(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6549{
6550 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG))
6551 {
6552 if (!pVCpu->hm.s.fUsingHyperDR7)
6553 {
6554 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
6555 uint32_t u32Val;
6556 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); AssertRCReturn(rc, rc);
6557 pMixedCtx->dr[7] = u32Val;
6558 }
6559
6560 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG);
6561 }
6562 return VINF_SUCCESS;
6563}
6564
6565
6566/**
6567 * Saves the guest APIC state from the current VMCS into the guest-CPU context.
6568 *
6569 * @returns VBox status code.
6570 * @param pVCpu Pointer to the VMCPU.
6571 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6572 * out-of-sync. Make sure to update the required fields
6573 * before using them.
6574 *
6575 * @remarks No-long-jump zone!!!
6576 */
6577static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6578{
6579 NOREF(pMixedCtx);
6580
6581 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
6582 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_APIC_STATE);
6583 return VINF_SUCCESS;
6584}
6585
6586
6587/**
6588 * Saves the entire guest state from the currently active VMCS into the
6589 * guest-CPU context. This essentially VMREADs all guest-data.
6590 *
6591 * @returns VBox status code.
6592 * @param pVCpu Pointer to the VMCPU.
6593 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6594 * out-of-sync. Make sure to update the required fields
6595 * before using them.
6596 */
6597static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6598{
6599 Assert(pVCpu);
6600 Assert(pMixedCtx);
6601
6602 if (HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL)
6603 return VINF_SUCCESS;
6604
6605 /* Though we can longjmp to ring-3 due to log-flushes here and get recalled
6606 again on the ring-3 callback path, there is no real need to. */
6607 if (VMMRZCallRing3IsEnabled(pVCpu))
6608 VMMR0LogFlushDisable(pVCpu);
6609 else
6610 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6611 Log4Func(("vcpu[%RU32]\n", pVCpu->idCpu));
6612
6613 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
6614 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6615
6616 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6617 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6618
6619 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6620 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6621
6622 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
6623 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6624
6625 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
6626 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDR7 failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6627
6628 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
6629 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6630
6631 rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
6632 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestLazyMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6633
6634 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
6635 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6636
6637 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
6638 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6639
6640 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
6641 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestApicState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6642
6643 AssertMsg(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL,
6644 ("Missed guest state bits while saving state; residue %RX32\n", HMVMXCPU_GST_VALUE(pVCpu)));
6645
6646 if (VMMRZCallRing3IsEnabled(pVCpu))
6647 VMMR0LogFlushEnable(pVCpu);
6648
6649 return rc;
6650}
6651
6652
6653/**
6654 * Check per-VM and per-VCPU force flag actions that require us to go back to
6655 * ring-3 for one reason or another.
6656 *
6657 * @returns VBox status code (information status code included).
6658 * @retval VINF_SUCCESS if we don't have any actions that require going back to
6659 * ring-3.
6660 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
6661 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
6662 * interrupts)
6663 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
6664 * all EMTs to be in ring-3.
6665 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
6666 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
6667 * to the EM loop.
6668 *
6669 * @param pVM Pointer to the VM.
6670 * @param pVCpu Pointer to the VMCPU.
6671 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6672 * out-of-sync. Make sure to update the required fields
6673 * before using them.
6674 */
6675static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6676{
6677 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6678
6679 if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
6680 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
6681 || VMCPU_FF_IS_PENDING(pVCpu, !pVCpu->hm.s.fSingleInstruction
6682 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
6683 {
6684 /* We need the control registers now, make sure the guest-CPU context is updated. */
6685 int rc3 = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6686 AssertRCReturn(rc3, rc3);
6687
6688 /* Pending HM CR3 sync. */
6689 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6690 {
6691 int rc2 = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
6692 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
6693 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
6694 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6695 }
6696
6697 /* Pending HM PAE PDPEs. */
6698 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6699 {
6700 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6701 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6702 }
6703
6704 /* Pending PGM C3 sync. */
6705 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
6706 {
6707 int rc2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4,
6708 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
6709 if (rc2 != VINF_SUCCESS)
6710 {
6711 AssertRC(rc2);
6712 Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", rc2));
6713 return rc2;
6714 }
6715 }
6716
6717 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
6718 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
6719 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
6720 {
6721 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
6722 int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
6723 Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
6724 return rc2;
6725 }
6726
6727 /* Pending VM request packets, such as hardware interrupts. */
6728 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
6729 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
6730 {
6731 Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
6732 return VINF_EM_PENDING_REQUEST;
6733 }
6734
6735 /* Pending PGM pool flushes. */
6736 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
6737 {
6738 Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
6739 return VINF_PGM_POOL_FLUSH_PENDING;
6740 }
6741
6742 /* Pending DMA requests. */
6743 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
6744 {
6745 Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
6746 return VINF_EM_RAW_TO_R3;
6747 }
6748 }
6749
6750 return VINF_SUCCESS;
6751}
6752
6753
6754/**
6755 * Converts any TRPM trap into a pending HM event. This is typically used when
6756 * entering from ring-3 (not longjmp returns).
6757 *
6758 * @param pVCpu Pointer to the VMCPU.
6759 */
6760static int hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
6761{
6762 Assert(TRPMHasTrap(pVCpu));
6763 Assert(!pVCpu->hm.s.Event.fPending);
6764
6765 uint8_t uVector;
6766 TRPMEVENT enmTrpmEvent;
6767 RTGCUINT uErrCode;
6768 RTGCUINTPTR GCPtrFaultAddress;
6769 uint8_t cbInstr;
6770
6771 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
6772 AssertRC(rc);
6773
6774 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntInfo. */
6775 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
6776 if (enmTrpmEvent == TRPM_TRAP)
6777 {
6778 switch (uVector)
6779 {
6780 case X86_XCPT_BP:
6781 case X86_XCPT_OF:
6782 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6783 break;
6784
6785 case X86_XCPT_PF:
6786 case X86_XCPT_DF:
6787 case X86_XCPT_TS:
6788 case X86_XCPT_NP:
6789 case X86_XCPT_SS:
6790 case X86_XCPT_GP:
6791 case X86_XCPT_AC:
6792 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6793 /* no break! */
6794 default:
6795 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6796 break;
6797 }
6798 }
6799 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
6800 {
6801 if (uVector == X86_XCPT_NMI)
6802 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6803 else
6804 {
6805 uint32_t uEFlags = CPUMGetGuestEFlags(pVCpu);
6806 if (!(uEFlags & X86_EFL_IF))
6807 return VERR_VMX_IPE_5;
6808 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6809 }
6810 }
6811 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
6812 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6813 else
6814 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
6815
6816 rc = TRPMResetTrap(pVCpu);
6817 AssertRC(rc);
6818 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
6819 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
6820
6821 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
6822 STAM_COUNTER_DEC(&pVCpu->hm.s.StatInjectPendingReflect);
6823 return VINF_SUCCESS;
6824}
6825
6826
6827/**
6828 * Converts any pending HM event into a TRPM trap. Typically used when leaving
6829 * VT-x to execute any instruction.
6830 *
6831 * @param pvCpu Pointer to the VMCPU.
6832 */
6833static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
6834{
6835 Assert(pVCpu->hm.s.Event.fPending);
6836
6837 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
6838 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo);
6839 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntInfo);
6840 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
6841
6842 /* If a trap was already pending, we did something wrong! */
6843 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
6844
6845 TRPMEVENT enmTrapType;
6846 switch (uVectorType)
6847 {
6848 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
6849 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
6850 enmTrapType = TRPM_HARDWARE_INT;
6851 break;
6852
6853 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
6854 enmTrapType = TRPM_SOFTWARE_INT;
6855 break;
6856
6857 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
6858 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
6859 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
6860 enmTrapType = TRPM_TRAP;
6861 break;
6862
6863 default:
6864 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
6865 enmTrapType = TRPM_32BIT_HACK;
6866 break;
6867 }
6868
6869 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
6870
6871 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
6872 AssertRC(rc);
6873
6874 if (fErrorCodeValid)
6875 TRPMSetErrorCode(pVCpu, uErrorCode);
6876
6877 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6878 && uVector == X86_XCPT_PF)
6879 {
6880 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
6881 }
6882 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6883 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6884 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6885 {
6886 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6887 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
6888 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
6889 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
6890 }
6891 pVCpu->hm.s.Event.fPending = false;
6892}
6893
6894
6895/**
6896 * Does the necessary state syncing before returning to ring-3 for any reason
6897 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
6898 *
6899 * @returns VBox status code.
6900 * @param pVM Pointer to the VM.
6901 * @param pVCpu Pointer to the VMCPU.
6902 * @param pMixedCtx Pointer to the guest-CPU context. The data may
6903 * be out-of-sync. Make sure to update the required
6904 * fields before using them.
6905 * @param fSaveGuestState Whether to save the guest state or not.
6906 *
6907 * @remarks No-long-jmp zone!!!
6908 */
6909static int hmR0VmxLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fSaveGuestState)
6910{
6911 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6912 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6913
6914 RTCPUID idCpu = RTMpCpuId();
6915 Log4Func(("HostCpuId=%u\n", idCpu));
6916
6917 /*
6918 * !!! IMPORTANT !!!
6919 * If you modify code here, make sure to check whether hmR0VmxCallRing3Callback() needs to be updated too.
6920 */
6921
6922 /* Save the guest state if necessary. */
6923 if ( fSaveGuestState
6924 && HMVMXCPU_GST_VALUE(pVCpu) != HMVMX_UPDATED_GUEST_ALL)
6925 {
6926 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6927 AssertRCReturn(rc, rc);
6928 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
6929 }
6930
6931 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
6932 if (CPUMIsGuestFPUStateActive(pVCpu))
6933 {
6934 /* We shouldn't reload CR0 without saving it first. */
6935 if (!fSaveGuestState)
6936 {
6937 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6938 AssertRCReturn(rc, rc);
6939 }
6940 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
6941 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
6942 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
6943 }
6944
6945 /* Restore host debug registers if necessary and resync on next R0 reentry. */
6946#ifdef VBOX_STRICT
6947 if (CPUMIsHyperDebugStateActive(pVCpu))
6948 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
6949#endif
6950 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
6951 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
6952 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu));
6953 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu));
6954
6955#if HC_ARCH_BITS == 64
6956 /* Restore host-state bits that VT-x only restores partially. */
6957 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
6958 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
6959 {
6960 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu));
6961 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
6962 }
6963 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
6964#endif
6965
6966#if HC_ARCH_BITS == 64
6967 /* Restore the host MSRs as we're leaving VT-x context. */
6968 if ( pVM->hm.s.fAllow64BitGuests
6969 && pVCpu->hm.s.vmx.fRestoreHostMsrs)
6970 {
6971 /* We shouldn't reload the guest MSRs without saving it first. */
6972 if (!fSaveGuestState)
6973 {
6974 int rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
6975 AssertRCReturn(rc, rc);
6976 }
6977 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS));
6978 hmR0VmxLazyRestoreHostMsrs(pVCpu);
6979 Assert(!pVCpu->hm.s.vmx.fRestoreHostMsrs);
6980 }
6981#endif
6982
6983 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
6984 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
6985
6986 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
6987 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
6988 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
6989 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
6990 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
6991 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
6992 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
6993 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
6994
6995 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
6996
6997 /** @todo This partially defeats the purpose of having preemption hooks.
6998 * The problem is, deregistering the hooks should be moved to a place that
6999 * lasts until the EMT is about to be destroyed not everytime while leaving HM
7000 * context.
7001 */
7002 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7003 {
7004 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7005 AssertRCReturn(rc, rc);
7006
7007 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7008 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
7009 }
7010 Assert(!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
7011 NOREF(idCpu);
7012
7013 return VINF_SUCCESS;
7014}
7015
7016
7017/**
7018 * Leaves the VT-x session.
7019 *
7020 * @returns VBox status code.
7021 * @param pVM Pointer to the VM.
7022 * @param pVCpu Pointer to the VMCPU.
7023 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7024 * out-of-sync. Make sure to update the required fields
7025 * before using them.
7026 *
7027 * @remarks No-long-jmp zone!!!
7028 */
7029DECLINLINE(int) hmR0VmxLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7030{
7031 HM_DISABLE_PREEMPT_IF_NEEDED();
7032 HMVMX_ASSERT_CPU_SAFE();
7033 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7034 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7035
7036 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
7037 and done this from the VMXR0ThreadCtxCallback(). */
7038 if (!pVCpu->hm.s.fLeaveDone)
7039 {
7040 int rc2 = hmR0VmxLeave(pVM, pVCpu, pMixedCtx, true /* fSaveGuestState */);
7041 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT_IF_NEEDED(), rc2);
7042 pVCpu->hm.s.fLeaveDone = true;
7043 }
7044
7045 /*
7046 * !!! IMPORTANT !!!
7047 * If you modify code here, make sure to check whether hmR0VmxCallRing3Callback() needs to be updated too.
7048 */
7049
7050 /* Deregister hook now that we've left HM context before re-enabling preemption. */
7051 /** @todo This is bad. Deregistering here means we need to VMCLEAR always
7052 * (longjmp/exit-to-r3) in VT-x which is not efficient. */
7053 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
7054 VMMR0ThreadCtxHooksDeregister(pVCpu);
7055
7056 /* Leave HM context. This takes care of local init (term). */
7057 int rc = HMR0LeaveCpu(pVCpu);
7058
7059 HM_RESTORE_PREEMPT_IF_NEEDED();
7060
7061 return rc;
7062}
7063
7064
7065/**
7066 * Does the necessary state syncing before doing a longjmp to ring-3.
7067 *
7068 * @returns VBox status code.
7069 * @param pVM Pointer to the VM.
7070 * @param pVCpu Pointer to the VMCPU.
7071 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7072 * out-of-sync. Make sure to update the required fields
7073 * before using them.
7074 *
7075 * @remarks No-long-jmp zone!!!
7076 */
7077DECLINLINE(int) hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7078{
7079 return hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
7080}
7081
7082
7083/**
7084 * Take necessary actions before going back to ring-3.
7085 *
7086 * An action requires us to go back to ring-3. This function does the necessary
7087 * steps before we can safely return to ring-3. This is not the same as longjmps
7088 * to ring-3, this is voluntary and prepares the guest so it may continue
7089 * executing outside HM (recompiler/IEM).
7090 *
7091 * @returns VBox status code.
7092 * @param pVM Pointer to the VM.
7093 * @param pVCpu Pointer to the VMCPU.
7094 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7095 * out-of-sync. Make sure to update the required fields
7096 * before using them.
7097 * @param rcExit The reason for exiting to ring-3. Can be
7098 * VINF_VMM_UNKNOWN_RING3_CALL.
7099 */
7100static int hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
7101{
7102 Assert(pVM);
7103 Assert(pVCpu);
7104 Assert(pMixedCtx);
7105 HMVMX_ASSERT_PREEMPT_SAFE();
7106
7107 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
7108 {
7109 VMXGetActivatedVmcs(&pVCpu->hm.s.vmx.LastError.u64VMCSPhys);
7110 pVCpu->hm.s.vmx.LastError.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
7111 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
7112 /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */
7113 }
7114
7115 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
7116 VMMRZCallRing3Disable(pVCpu);
7117 Log4(("hmR0VmxExitToRing3: pVCpu=%p idCpu=%RU32 rcExit=%d\n", pVCpu, pVCpu->idCpu, rcExit));
7118
7119 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
7120 if (pVCpu->hm.s.Event.fPending)
7121 {
7122 hmR0VmxPendingEventToTrpmTrap(pVCpu);
7123 Assert(!pVCpu->hm.s.Event.fPending);
7124 }
7125
7126 /* Save guest state and restore host state bits. */
7127 int rc = hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
7128 AssertRCReturn(rc, rc);
7129 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7130
7131 /* Sync recompiler state. */
7132 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
7133 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
7134 | CPUM_CHANGED_LDTR
7135 | CPUM_CHANGED_GDTR
7136 | CPUM_CHANGED_IDTR
7137 | CPUM_CHANGED_TR
7138 | CPUM_CHANGED_HIDDEN_SEL_REGS);
7139 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
7140 if ( pVM->hm.s.fNestedPaging
7141 && CPUMIsGuestPagingEnabledEx(pMixedCtx))
7142 {
7143 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
7144 }
7145
7146 Assert(!pVCpu->hm.s.fClearTrapFlag);
7147
7148 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
7149 if (rcExit != VINF_EM_RAW_INTERRUPT)
7150 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
7151
7152 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
7153
7154 /* We do -not- want any longjmp notifications after this! We must return to ring-3 ASAP. */
7155 VMMRZCallRing3RemoveNotification(pVCpu);
7156 VMMRZCallRing3Enable(pVCpu);
7157
7158 return rc;
7159}
7160
7161
7162/**
7163 * VMMRZCallRing3() callback wrapper which saves the guest state before we
7164 * longjump to ring-3 and possibly get preempted.
7165 *
7166 * @returns VBox status code.
7167 * @param pVCpu Pointer to the VMCPU.
7168 * @param enmOperation The operation causing the ring-3 longjump.
7169 * @param pvUser Opaque pointer to the guest-CPU context. The data
7170 * may be out-of-sync. Make sure to update the required
7171 * fields before using them.
7172 */
7173DECLCALLBACK(int) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
7174{
7175 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
7176 {
7177 /*
7178 * !!! IMPORTANT !!!
7179 * If you modify code here, make sure to check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs
7180 * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.
7181 */
7182 VMMRZCallRing3RemoveNotification(pVCpu);
7183 VMMRZCallRing3Disable(pVCpu);
7184 HM_DISABLE_PREEMPT_IF_NEEDED();
7185
7186 PVM pVM = pVCpu->CTX_SUFF(pVM);
7187 if (CPUMIsGuestFPUStateActive(pVCpu))
7188 CPUMR0SaveGuestFPU(pVM, pVCpu, (PCPUMCTX)pvUser);
7189
7190 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
7191
7192#if HC_ARCH_BITS == 64
7193 /* Restore host-state bits that VT-x only restores partially. */
7194 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
7195 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
7196 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
7197 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
7198
7199 /* Restore the host MSRs as we're leaving VT-x context. */
7200 if ( pVM->hm.s.fAllow64BitGuests
7201 && pVCpu->hm.s.vmx.fRestoreHostMsrs)
7202 {
7203 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7204 }
7205#endif
7206 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
7207 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7208 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7209 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7210 {
7211 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7212 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7213 }
7214
7215 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
7216 VMMR0ThreadCtxHooksDeregister(pVCpu);
7217
7218 HMR0LeaveCpu(pVCpu);
7219 HM_RESTORE_PREEMPT_IF_NEEDED();
7220 return VINF_SUCCESS;
7221 }
7222
7223 Assert(pVCpu);
7224 Assert(pvUser);
7225 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7226 HMVMX_ASSERT_PREEMPT_SAFE();
7227
7228 VMMRZCallRing3Disable(pVCpu);
7229 Assert(VMMR0IsLogFlushDisabled(pVCpu));
7230
7231 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32\n enmOperation=%d", pVCpu, pVCpu->idCpu,
7232 enmOperation));
7233
7234 int rc = hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
7235 AssertRCReturn(rc, rc);
7236
7237 VMMRZCallRing3Enable(pVCpu);
7238 return VINF_SUCCESS;
7239}
7240
7241
7242/**
7243 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
7244 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
7245 *
7246 * @param pVCpu Pointer to the VMCPU.
7247 */
7248DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
7249{
7250 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7251 {
7252 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7253 {
7254 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7255 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7256 AssertRC(rc);
7257 Log4(("Setup interrupt-window exiting\n"));
7258 }
7259 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
7260}
7261
7262
7263/**
7264 * Clears the interrupt-window exiting control in the VMCS.
7265 *
7266 * @param pVCpu Pointer to the VMCPU.
7267 */
7268DECLINLINE(void) hmR0VmxClearIntWindowExitVmcs(PVMCPU pVCpu)
7269{
7270 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7271 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7272 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7273 AssertRC(rc);
7274 Log4(("Cleared interrupt-window exiting\n"));
7275}
7276
7277
7278/**
7279 * Evaluates the event to be delivered to the guest and sets it as the pending
7280 * event.
7281 *
7282 * @param pVCpu Pointer to the VMCPU.
7283 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7284 * out-of-sync. Make sure to update the required fields
7285 * before using them.
7286 */
7287static void hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7288{
7289 Assert(!pVCpu->hm.s.Event.fPending);
7290
7291 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
7292 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7293 bool fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7294 bool fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7295
7296 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7297 Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/
7298 && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
7299 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7300 Assert(!TRPMHasTrap(pVCpu));
7301
7302 /** @todo SMI. SMIs take priority over NMIs. */
7303 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */
7304 {
7305 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
7306 if ( !fBlockMovSS
7307 && !fBlockSti)
7308 {
7309 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
7310 Log4(("Pending NMI vcpu[%RU32]\n", pVCpu->idCpu));
7311 uint32_t u32IntInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
7312 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7313
7314 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7315 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
7316 }
7317 else
7318 hmR0VmxSetIntWindowExitVmcs(pVCpu);
7319 }
7320 /*
7321 * Check if the guest can receive external interrupts (PIC/APIC). Once we do PDMGetInterrupt() we -must- deliver
7322 * the interrupt ASAP. We must not execute any guest code until we inject the interrupt.
7323 */
7324 else if ( VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
7325 && !pVCpu->hm.s.fSingleInstruction)
7326 {
7327 int rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7328 AssertRC(rc);
7329 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7330 if ( !fBlockInt
7331 && !fBlockSti
7332 && !fBlockMovSS)
7333 {
7334 uint8_t u8Interrupt;
7335 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
7336 if (RT_SUCCESS(rc))
7337 {
7338 Log4(("Pending interrupt vcpu[%RU32] u8Interrupt=%#x \n", pVCpu->idCpu, u8Interrupt));
7339 uint32_t u32IntInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID;
7340 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7341
7342 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrfaultAddress */);
7343 }
7344 else
7345 {
7346 /** @todo Does this actually happen? If not turn it into an assertion. */
7347 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
7348 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
7349 }
7350 }
7351 else
7352 hmR0VmxSetIntWindowExitVmcs(pVCpu);
7353 }
7354}
7355
7356
7357/**
7358 * Sets a pending-debug exception to be delivered to the guest if the guest is
7359 * single-stepping.
7360 *
7361 * @param pVCpu Pointer to the VMCPU.
7362 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7363 * out-of-sync. Make sure to update the required fields
7364 * before using them.
7365 */
7366DECLINLINE(void) hmR0VmxSetPendingDebugXcpt(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7367{
7368 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7369 if (pMixedCtx->eflags.Bits.u1TF) /* We don't have any IA32_DEBUGCTL MSR for guests. Treat as all bits 0. */
7370 {
7371 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
7372 AssertRC(rc);
7373 }
7374}
7375
7376
7377/**
7378 * Injects any pending events into the guest if the guest is in a state to
7379 * receive them.
7380 *
7381 * @returns VBox status code (informational status codes included).
7382 * @param pVCpu Pointer to the VMCPU.
7383 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7384 * out-of-sync. Make sure to update the required fields
7385 * before using them.
7386 */
7387static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7388{
7389 HMVMX_ASSERT_PREEMPT_SAFE();
7390 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7391
7392 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
7393 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7394 bool fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7395 bool fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7396
7397 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7398 Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/
7399 && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
7400 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7401 Assert(!TRPMHasTrap(pVCpu));
7402
7403 int rc = VINF_SUCCESS;
7404 if (pVCpu->hm.s.Event.fPending)
7405 {
7406 /*
7407 * Clear any interrupt-window exiting control if we're going to inject an interrupt. Saves one extra
7408 * VM-exit in situations where we previously setup interrupt-window exiting but got other VM-exits and
7409 * ended up enabling interrupts outside VT-x.
7410 */
7411 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
7412 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
7413 && ( uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT
7414 || uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI))
7415 {
7416 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7417 hmR0VmxClearIntWindowExitVmcs(pVCpu);
7418 }
7419#if 1 /* defined(VBOX_STRICT) */ /* Temporarily for debugging. */
7420 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7421 {
7422 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7423 if (fBlockInt)
7424 return VERR_VMX_IPE_4;
7425 Assert(!fBlockSti);
7426 Assert(!fBlockMovSS);
7427 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT));
7428 }
7429 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
7430 {
7431 Assert(!fBlockSti);
7432 Assert(!fBlockMovSS);
7433 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT));
7434 }
7435#endif
7436 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#x\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
7437 (uint8_t)uIntType));
7438 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
7439 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState);
7440 AssertRCReturn(rc, rc);
7441
7442 /* Update the interruptibility-state as it could have been changed by
7443 hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */
7444 fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7445 fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7446
7447#ifdef VBOX_WITH_STATISTICS
7448 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7449 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
7450 else
7451 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
7452#endif
7453 }
7454
7455 /* Deliver pending debug exception if the guest is single-stepping. Evaluate and set the BS bit. */
7456 if ( fBlockSti
7457 || fBlockMovSS)
7458 {
7459 if ( !pVCpu->hm.s.fSingleInstruction
7460 && !DBGFIsStepping(pVCpu))
7461 {
7462 /*
7463 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD,
7464 * VMX_EXIT_MTF, VMX_EXIT_APIC_WRITE and VMX_EXIT_VIRTUALIZED_EOI.
7465 * See Intel spec. 27.3.4 "Saving Non-Register State".
7466 */
7467 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7468 AssertRCReturn(rc2, rc2);
7469 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
7470 }
7471 else if (pMixedCtx->eflags.Bits.u1TF)
7472 {
7473 /*
7474 * We are single-stepping in the hypervisor debugger using EFLAGS.TF. Clear interrupt inhibition as setting the
7475 * BS bit would mean delivering a #DB to the guest upon VM-entry when it shouldn't be.
7476 */
7477 Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG));
7478 uIntrState = 0;
7479 }
7480 }
7481
7482 /*
7483 * There's no need to clear the VM entry-interruption information field here if we're not injecting anything.
7484 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7485 */
7486 int rc2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
7487 AssertRC(rc2);
7488
7489 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
7490 NOREF(fBlockMovSS); NOREF(fBlockSti);
7491 return rc;
7492}
7493
7494
7495/**
7496 * Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
7497 *
7498 * @param pVCpu Pointer to the VMCPU.
7499 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7500 * out-of-sync. Make sure to update the required fields
7501 * before using them.
7502 */
7503DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7504{
7505 NOREF(pMixedCtx);
7506 uint32_t u32IntInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID;
7507 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7508}
7509
7510
7511/**
7512 * Injects a double-fault (#DF) exception into the VM.
7513 *
7514 * @returns VBox status code (informational status code included).
7515 * @param pVCpu Pointer to the VMCPU.
7516 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7517 * out-of-sync. Make sure to update the required fields
7518 * before using them.
7519 */
7520DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t *puIntrState)
7521{
7522 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7523 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7524 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7525 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
7526 puIntrState);
7527}
7528
7529
7530/**
7531 * Sets a debug (#DB) exception as pending-for-injection into the VM.
7532 *
7533 * @param pVCpu Pointer to the VMCPU.
7534 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7535 * out-of-sync. Make sure to update the required fields
7536 * before using them.
7537 */
7538DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7539{
7540 NOREF(pMixedCtx);
7541 uint32_t u32IntInfo = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID;
7542 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7543 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7544}
7545
7546
7547/**
7548 * Sets an overflow (#OF) exception as pending-for-injection into the VM.
7549 *
7550 * @param pVCpu Pointer to the VMCPU.
7551 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7552 * out-of-sync. Make sure to update the required fields
7553 * before using them.
7554 * @param cbInstr The value of RIP that is to be pushed on the guest
7555 * stack.
7556 */
7557DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
7558{
7559 NOREF(pMixedCtx);
7560 uint32_t u32IntInfo = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7561 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7562 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7563}
7564
7565
7566/**
7567 * Injects a general-protection (#GP) fault into the VM.
7568 *
7569 * @returns VBox status code (informational status code included).
7570 * @param pVCpu Pointer to the VMCPU.
7571 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7572 * out-of-sync. Make sure to update the required fields
7573 * before using them.
7574 * @param fErrorCodeValid Whether the error code is valid (depends on the CPU
7575 * mode, i.e. in real-mode it's not valid).
7576 * @param u32ErrorCode The error code associated with the #GP.
7577 */
7578DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
7579 uint32_t *puIntrState)
7580{
7581 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7582 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7583 if (fErrorCodeValid)
7584 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7585 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
7586 puIntrState);
7587}
7588
7589
7590/**
7591 * Sets a general-protection (#GP) exception as pending-for-injection into the
7592 * VM.
7593 *
7594 * @param pVCpu Pointer to the VMCPU.
7595 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7596 * out-of-sync. Make sure to update the required fields
7597 * before using them.
7598 * @param u32ErrorCode The error code associated with the #GP.
7599 */
7600DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t u32ErrorCode)
7601{
7602 NOREF(pMixedCtx);
7603 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7604 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7605 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7606 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */);
7607}
7608
7609
7610/**
7611 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
7612 *
7613 * @param pVCpu Pointer to the VMCPU.
7614 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7615 * out-of-sync. Make sure to update the required fields
7616 * before using them.
7617 * @param uVector The software interrupt vector number.
7618 * @param cbInstr The value of RIP that is to be pushed on the guest
7619 * stack.
7620 */
7621DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
7622{
7623 NOREF(pMixedCtx);
7624 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
7625 if ( uVector == X86_XCPT_BP
7626 || uVector == X86_XCPT_OF)
7627 {
7628 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7629 }
7630 else
7631 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7632 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7633}
7634
7635
7636/**
7637 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
7638 * stack.
7639 *
7640 * @returns VBox status code (information status code included).
7641 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
7642 * @param pVM Pointer to the VM.
7643 * @param pMixedCtx Pointer to the guest-CPU context.
7644 * @param uValue The value to push to the guest stack.
7645 */
7646DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
7647{
7648 /*
7649 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
7650 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
7651 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
7652 */
7653 if (pMixedCtx->sp == 1)
7654 return VINF_EM_RESET;
7655 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
7656 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
7657 AssertRCReturn(rc, rc);
7658 return rc;
7659}
7660
7661
7662/**
7663 * Injects an event into the guest upon VM-entry by updating the relevant fields
7664 * in the VM-entry area in the VMCS.
7665 *
7666 * @returns VBox status code (informational error codes included).
7667 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
7668 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
7669 *
7670 * @param pVCpu Pointer to the VMCPU.
7671 * @param pMixedCtx Pointer to the guest-CPU context. The data may
7672 * be out-of-sync. Make sure to update the required
7673 * fields before using them.
7674 * @param u64IntInfo The VM-entry interruption-information field.
7675 * @param cbInstr The VM-entry instruction length in bytes (for
7676 * software interrupts, exceptions and privileged
7677 * software exceptions).
7678 * @param u32ErrCode The VM-entry exception error code.
7679 * @param GCPtrFaultAddress The page-fault address for #PF exceptions.
7680 * @param puIntrState Pointer to the current guest interruptibility-state.
7681 * This interruptibility-state will be updated if
7682 * necessary. This cannot not be NULL.
7683 *
7684 * @remarks Requires CR0!
7685 * @remarks No-long-jump zone!!!
7686 */
7687static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
7688 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState)
7689{
7690 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
7691 AssertMsg(u64IntInfo >> 32 == 0, ("%#RX64\n", u64IntInfo));
7692 Assert(puIntrState);
7693 uint32_t u32IntInfo = (uint32_t)u64IntInfo;
7694
7695 const uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntInfo);
7696 const uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo);
7697
7698#ifdef VBOX_STRICT
7699 /* Validate the error-code-valid bit for hardware exceptions. */
7700 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT)
7701 {
7702 switch (uVector)
7703 {
7704 case X86_XCPT_PF:
7705 case X86_XCPT_DF:
7706 case X86_XCPT_TS:
7707 case X86_XCPT_NP:
7708 case X86_XCPT_SS:
7709 case X86_XCPT_GP:
7710 case X86_XCPT_AC:
7711 AssertMsg(VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo),
7712 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
7713 /* fallthru */
7714 default:
7715 break;
7716 }
7717 }
7718#endif
7719
7720 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
7721 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
7722 || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
7723
7724 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
7725
7726 /* We require CR0 to check if the guest is in real-mode. */
7727 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7728 AssertRCReturn(rc, rc);
7729
7730 /*
7731 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
7732 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
7733 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
7734 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
7735 */
7736 if (CPUMIsGuestInRealModeEx(pMixedCtx))
7737 {
7738 PVM pVM = pVCpu->CTX_SUFF(pVM);
7739 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
7740 {
7741 Assert(PDMVmmDevHeapIsEnabled(pVM));
7742 Assert(pVM->hm.s.vmx.pRealModeTSS);
7743
7744 /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */
7745 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7746 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
7747 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
7748 AssertRCReturn(rc, rc);
7749 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP));
7750
7751 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
7752 const size_t cbIdtEntry = sizeof(X86IDTR16);
7753 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
7754 {
7755 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
7756 if (uVector == X86_XCPT_DF)
7757 return VINF_EM_RESET;
7758 else if (uVector == X86_XCPT_GP)
7759 {
7760 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
7761 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, puIntrState);
7762 }
7763
7764 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
7765 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
7766 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, puIntrState);
7767 }
7768
7769 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
7770 uint16_t uGuestIp = pMixedCtx->ip;
7771 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
7772 {
7773 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
7774 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
7775 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
7776 }
7777 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
7778 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
7779
7780 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
7781 X86IDTR16 IdtEntry;
7782 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
7783 rc = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
7784 AssertRCReturn(rc, rc);
7785
7786 /* Construct the stack frame for the interrupt/exception handler. */
7787 rc = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
7788 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
7789 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
7790 AssertRCReturn(rc, rc);
7791
7792 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
7793 if (rc == VINF_SUCCESS)
7794 {
7795 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
7796 pMixedCtx->rip = IdtEntry.offSel;
7797 pMixedCtx->cs.Sel = IdtEntry.uSel;
7798 pMixedCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
7799 if ( uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
7800 && uVector == X86_XCPT_PF)
7801 {
7802 pMixedCtx->cr2 = GCPtrFaultAddress;
7803 }
7804
7805 /* If any other guest-state bits are changed here, make sure to update
7806 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */
7807 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS
7808 | HM_CHANGED_GUEST_RIP
7809 | HM_CHANGED_GUEST_RFLAGS
7810 | HM_CHANGED_GUEST_RSP);
7811
7812 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
7813 if (*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
7814 {
7815 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
7816 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
7817 Log4(("Clearing inhibition due to STI.\n"));
7818 *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
7819 }
7820 Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntInfo, u32ErrCode, cbInstr));
7821
7822 /* The event has been truly dispatched. Mark it as no longer pending so we don't attempt to 'undo'
7823 it, if we are returning to ring-3 before executing guest code. */
7824 pVCpu->hm.s.Event.fPending = false;
7825 }
7826 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
7827 return rc;
7828 }
7829 else
7830 {
7831 /*
7832 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
7833 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
7834 */
7835 u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7836 }
7837 }
7838
7839 /* Validate. */
7840 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
7841 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK(u32IntInfo)); /* Bit 12 MBZ. */
7842 Assert(!(u32IntInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
7843
7844 /* Inject. */
7845 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
7846 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo))
7847 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
7848 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
7849
7850 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
7851 && uVector == X86_XCPT_PF)
7852 {
7853 pMixedCtx->cr2 = GCPtrFaultAddress;
7854 }
7855
7856 Log4(("Injecting vcpu[%RU32] u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", pVCpu->idCpu,
7857 u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
7858
7859 AssertRCReturn(rc, rc);
7860 return rc;
7861}
7862
7863
7864/**
7865 * Clears the interrupt-window exiting control in the VMCS and if necessary
7866 * clears the current event in the VMCS as well.
7867 *
7868 * @returns VBox status code.
7869 * @param pVCpu Pointer to the VMCPU.
7870 *
7871 * @remarks Use this function only to clear events that have not yet been
7872 * delivered to the guest but are injected in the VMCS!
7873 * @remarks No-long-jump zone!!!
7874 */
7875static void hmR0VmxClearEventVmcs(PVMCPU pVCpu)
7876{
7877 int rc;
7878 Log4Func(("vcpu[%d]\n", pVCpu->idCpu));
7879
7880 /* Clear interrupt-window exiting control. */
7881 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
7882 hmR0VmxClearIntWindowExitVmcs(pVCpu);
7883
7884 if (!pVCpu->hm.s.Event.fPending)
7885 return;
7886
7887#ifdef VBOX_STRICT
7888 uint32_t u32EntryInfo;
7889 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
7890 AssertRC(rc);
7891 Assert(VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo));
7892#endif
7893
7894 /* Clear the entry-interruption field (including the valid bit). */
7895 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0);
7896 AssertRC(rc);
7897
7898 /* Clear the pending debug exception field. */
7899 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
7900 AssertRC(rc);
7901
7902 /* We deliberately don't clear "hm.s.Event.fPending" here, it's taken
7903 care of in hmR0VmxExitToRing3() converting the pending event to TRPM. */
7904}
7905
7906
7907/**
7908 * Enters the VT-x session.
7909 *
7910 * @returns VBox status code.
7911 * @param pVM Pointer to the VM.
7912 * @param pVCpu Pointer to the VMCPU.
7913 * @param pCpu Pointer to the CPU info struct.
7914 */
7915VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
7916{
7917 AssertPtr(pVM);
7918 AssertPtr(pVCpu);
7919 Assert(pVM->hm.s.vmx.fSupported);
7920 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7921 NOREF(pCpu); NOREF(pVM);
7922
7923 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
7924 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
7925
7926#ifdef VBOX_STRICT
7927 /* Make sure we're in VMX root mode. */
7928 RTCCUINTREG u32HostCR4 = ASMGetCR4();
7929 if (!(u32HostCR4 & X86_CR4_VMXE))
7930 {
7931 LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
7932 return VERR_VMX_X86_CR4_VMXE_CLEARED;
7933 }
7934#endif
7935
7936 /*
7937 * Load the VCPU's VMCS as the current (and active) one.
7938 */
7939 Assert(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR);
7940 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7941 if (RT_FAILURE(rc))
7942 return rc;
7943
7944 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
7945 pVCpu->hm.s.fLeaveDone = false;
7946 Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
7947
7948 return VINF_SUCCESS;
7949}
7950
7951
7952/**
7953 * The thread-context callback (only on platforms which support it).
7954 *
7955 * @param enmEvent The thread-context event.
7956 * @param pVCpu Pointer to the VMCPU.
7957 * @param fGlobalInit Whether global VT-x/AMD-V init. was used.
7958 * @thread EMT(pVCpu)
7959 */
7960VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
7961{
7962 NOREF(fGlobalInit);
7963
7964 switch (enmEvent)
7965 {
7966 case RTTHREADCTXEVENT_PREEMPTING:
7967 {
7968 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7969 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
7970 VMCPU_ASSERT_EMT(pVCpu);
7971
7972 PVM pVM = pVCpu->CTX_SUFF(pVM);
7973 PCPUMCTX pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu);
7974
7975 /* No longjmps (logger flushes, locks) in this fragile context. */
7976 VMMRZCallRing3Disable(pVCpu);
7977 Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId()));
7978
7979 /*
7980 * Restore host-state (FPU, debug etc.)
7981 */
7982 if (!pVCpu->hm.s.fLeaveDone)
7983 {
7984 /* Do -not- save guest-state here as we might already be in the middle of saving it (esp. bad if we are
7985 holding the PGM lock while saving the guest state (see hmR0VmxSaveGuestControlRegs()). */
7986 hmR0VmxLeave(pVM, pVCpu, pMixedCtx, false /* fSaveGuestState */);
7987 pVCpu->hm.s.fLeaveDone = true;
7988 }
7989
7990 /* Leave HM context, takes care of local init (term). */
7991 int rc = HMR0LeaveCpu(pVCpu);
7992 AssertRC(rc); NOREF(rc);
7993
7994 /* Restore longjmp state. */
7995 VMMRZCallRing3Enable(pVCpu);
7996 STAM_COUNTER_INC(&pVCpu->hm.s.StatPreemptPreempting);
7997 break;
7998 }
7999
8000 case RTTHREADCTXEVENT_RESUMED:
8001 {
8002 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8003 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
8004 VMCPU_ASSERT_EMT(pVCpu);
8005
8006 /* No longjmps here, as we don't want to trigger preemption (& its hook) while resuming. */
8007 VMMRZCallRing3Disable(pVCpu);
8008 Log4Func(("Resumed: HostCpuId=%u\n", RTMpCpuId()));
8009
8010 /* Initialize the bare minimum state required for HM. This takes care of
8011 initializing VT-x if necessary (onlined CPUs, local init etc.) */
8012 int rc = HMR0EnterCpu(pVCpu);
8013 AssertRC(rc);
8014 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
8015
8016 /* Load the active VMCS as the current one. */
8017 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR)
8018 {
8019 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8020 AssertRC(rc); NOREF(rc);
8021 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8022 Log4Func(("Resumed: Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8023 }
8024 pVCpu->hm.s.fLeaveDone = false;
8025
8026 /* Restore longjmp state. */
8027 VMMRZCallRing3Enable(pVCpu);
8028 break;
8029 }
8030
8031 default:
8032 break;
8033 }
8034}
8035
8036
8037/**
8038 * Saves the host state in the VMCS host-state.
8039 * Sets up the VM-exit MSR-load area.
8040 *
8041 * The CPU state will be loaded from these fields on every successful VM-exit.
8042 *
8043 * @returns VBox status code.
8044 * @param pVM Pointer to the VM.
8045 * @param pVCpu Pointer to the VMCPU.
8046 *
8047 * @remarks No-long-jump zone!!!
8048 */
8049static int hmR0VmxSaveHostState(PVM pVM, PVMCPU pVCpu)
8050{
8051 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8052
8053 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
8054 return VINF_SUCCESS;
8055
8056 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
8057 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8058
8059 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
8060 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8061
8062 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
8063 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8064
8065 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
8066 return rc;
8067}
8068
8069
8070/**
8071 * Saves the host state in the VMCS host-state.
8072 *
8073 * @returns VBox status code.
8074 * @param pVM Pointer to the VM.
8075 * @param pVCpu Pointer to the VMCPU.
8076 *
8077 * @remarks No-long-jump zone!!!
8078 */
8079VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
8080{
8081 AssertPtr(pVM);
8082 AssertPtr(pVCpu);
8083
8084 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8085
8086 /* Save the host state here while entering HM context. When thread-context hooks are used, we might get preempted
8087 and have to resave the host state but most of the time we won't be, so do it here before we disable interrupts. */
8088 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8089 return hmR0VmxSaveHostState(pVM, pVCpu);
8090}
8091
8092
8093/**
8094 * Loads the guest state into the VMCS guest-state area. The CPU state will be
8095 * loaded from these fields on every successful VM-entry.
8096 *
8097 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas.
8098 * Sets up the VM-entry controls.
8099 * Sets up the appropriate VMX non-root function to execute guest code based on
8100 * the guest CPU mode.
8101 *
8102 * @returns VBox status code.
8103 * @param pVM Pointer to the VM.
8104 * @param pVCpu Pointer to the VMCPU.
8105 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8106 * out-of-sync. Make sure to update the required fields
8107 * before using them.
8108 *
8109 * @remarks No-long-jump zone!!!
8110 */
8111static int hmR0VmxLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
8112{
8113 AssertPtr(pVM);
8114 AssertPtr(pVCpu);
8115 AssertPtr(pMixedCtx);
8116 HMVMX_ASSERT_PREEMPT_SAFE();
8117
8118#ifdef LOG_ENABLED
8119 /** @todo r=ramshankar: I'm not able to use VMMRZCallRing3Disable() here,
8120 * probably not initialized yet? Anyway this will do for now.
8121 *
8122 * Update: Should be possible once VMXR0LoadGuestState() is removed as an
8123 * interface and disable ring-3 calls when thread-context hooks are not
8124 * available. */
8125 bool fCallerDisabledLogFlush = VMMR0IsLogFlushDisabled(pVCpu);
8126 VMMR0LogFlushDisable(pVCpu);
8127#endif
8128
8129 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8130
8131 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
8132
8133 /* Determine real-on-v86 mode. */
8134 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
8135 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
8136 && CPUMIsGuestInRealModeEx(pMixedCtx))
8137 {
8138 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
8139 }
8140
8141 /*
8142 * Load the guest-state into the VMCS.
8143 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
8144 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
8145 */
8146 int rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
8147 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8148
8149 /* This needs to be done after hmR0VmxSetupVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */
8150 rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
8151 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8152
8153 /* This needs to be done after hmR0VmxSetupVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */
8154 rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
8155 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8156
8157 rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
8158 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8159
8160 rc = hmR0VmxLoadGuestCR3AndCR4(pVCpu, pMixedCtx);
8161 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestCR3AndCR4: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8162
8163 /* Assumes pMixedCtx->cr0 is up-to-date (strict builds require CR0 for segment register validation checks). */
8164 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
8165 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8166
8167 /* This needs to be done after hmR0VmxLoadGuestEntryCtls() and hmR0VmxLoadGuestExitCtls() as it may alter controls if we
8168 determine we don't have to swap EFER after all. */
8169 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
8170 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadSharedMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8171
8172 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
8173 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8174
8175 /*
8176 * Loading Rflags here is fine, even though Rflags.TF might depend on guest debug state (which is not loaded here).
8177 * It is re-evaluated and updated if necessary in hmR0VmxLoadSharedState().
8178 */
8179 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
8180 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestRipRspRflags! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8181
8182 /* Clear any unused and reserved bits. */
8183 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
8184
8185#ifdef LOG_ENABLED
8186 /* Only reenable log-flushing if the caller has it enabled. */
8187 if (!fCallerDisabledLogFlush)
8188 VMMR0LogFlushEnable(pVCpu);
8189#endif
8190
8191 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
8192 return rc;
8193}
8194
8195
8196/**
8197 * Loads the state shared between the host and guest into the VMCS.
8198 *
8199 * @param pVM Pointer to the VM.
8200 * @param pVCpu Pointer to the VMCPU.
8201 * @param pCtx Pointer to the guest-CPU context.
8202 *
8203 * @remarks No-long-jump zone!!!
8204 */
8205static void hmR0VmxLoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8206{
8207 NOREF(pVM);
8208
8209 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8210 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8211
8212 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
8213 {
8214 int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx);
8215 AssertRC(rc);
8216 }
8217
8218 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
8219 {
8220 int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx);
8221 AssertRC(rc);
8222
8223 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
8224 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
8225 {
8226 rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx);
8227 AssertRC(rc);
8228 }
8229 }
8230
8231 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS))
8232 {
8233#if HC_ARCH_BITS == 64
8234 if (pVM->hm.s.fAllow64BitGuests)
8235 hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
8236#endif
8237 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
8238 }
8239
8240 AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
8241 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8242}
8243
8244
8245/**
8246 * Worker for loading the guest-state bits in the inner VT-x execution loop.
8247 *
8248 * @param pVM Pointer to the VM.
8249 * @param pVCpu Pointer to the VMCPU.
8250 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8251 * out-of-sync. Make sure to update the required fields
8252 * before using them.
8253 */
8254DECLINLINE(void) hmR0VmxLoadGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
8255{
8256 HMVMX_ASSERT_PREEMPT_SAFE();
8257
8258 Log5(("LoadFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8259#ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
8260 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
8261#endif
8262
8263 if (HMCPU_CF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP))
8264 {
8265 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
8266 AssertRC(rc);
8267 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
8268 }
8269 else if (HMCPU_CF_VALUE(pVCpu))
8270 {
8271 int rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx);
8272 AssertRC(rc);
8273 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
8274 }
8275
8276 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
8277 AssertMsg( !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
8278 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
8279 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8280}
8281
8282
8283/**
8284 * Does the preparations before executing guest code in VT-x.
8285 *
8286 * This may cause longjmps to ring-3 and may even result in rescheduling to the
8287 * recompiler. We must be cautious what we do here regarding committing
8288 * guest-state information into the VMCS assuming we assuredly execute the
8289 * guest in VT-x mode. If we fall back to the recompiler after updating the VMCS
8290 * and clearing the common-state (TRPM/forceflags), we must undo those changes
8291 * so that the recompiler can (and should) use them when it resumes guest
8292 * execution. Otherwise such operations must be done when we can no longer
8293 * exit to ring-3.
8294 *
8295 * @returns Strict VBox status code.
8296 * @retval VINF_SUCCESS if we can proceed with running the guest, interrupts
8297 * have been disabled.
8298 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a
8299 * double-fault into the guest.
8300 * @retval VINF_* scheduling changes, we have to go back to ring-3.
8301 *
8302 * @param pVM Pointer to the VM.
8303 * @param pVCpu Pointer to the VMCPU.
8304 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8305 * out-of-sync. Make sure to update the required fields
8306 * before using them.
8307 * @param pVmxTransient Pointer to the VMX transient structure.
8308 */
8309static int hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8310{
8311 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8312
8313#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
8314 PGMRZDynMapFlushAutoSet(pVCpu);
8315#endif
8316
8317 /* Check force flag actions that might require us to go back to ring-3. */
8318 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
8319 if (rc != VINF_SUCCESS)
8320 return rc;
8321
8322#ifndef IEM_VERIFICATION_MODE_FULL
8323 /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
8324 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
8325 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
8326 {
8327 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
8328 RTGCPHYS GCPhysApicBase;
8329 GCPhysApicBase = pMixedCtx->msrApicBase;
8330 GCPhysApicBase &= PAGE_BASE_GC_MASK;
8331
8332 /* Unalias any existing mapping. */
8333 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
8334 AssertRCReturn(rc, rc);
8335
8336 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
8337 Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));
8338 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
8339 AssertRCReturn(rc, rc);
8340
8341 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
8342 }
8343#endif /* !IEM_VERIFICATION_MODE_FULL */
8344
8345 /*
8346 * Evaluate events as pending-for-injection into the guest. Toggling of force-flags here is safe as long as
8347 * we update TRPM on premature exits to ring-3 before executing guest code. We must NOT restore the force-flags.
8348 */
8349 if (TRPMHasTrap(pVCpu))
8350 {
8351 rc = hmR0VmxTrpmTrapToPendingEvent(pVCpu);
8352 if (RT_FAILURE(rc))
8353 return rc;
8354 }
8355 else if (!pVCpu->hm.s.Event.fPending)
8356 hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);
8357
8358 /*
8359 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus needs to be done with
8360 * longjmps or interrupts + preemption enabled. Event injection might also result in triple-faulting the VM.
8361 */
8362 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx);
8363 if (RT_UNLIKELY(rc != VINF_SUCCESS))
8364 {
8365 //Assert(rc == VINF_EM_RESET);
8366 return rc;
8367 }
8368
8369 /*
8370 * Load the guest state bits, we can handle longjmps/getting preempted here.
8371 *
8372 * If we are injecting events to a real-on-v86 mode guest, we will have to update
8373 * RIP and some segment registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs().
8374 * Hence, this needs to be done -after- injection of events.
8375 */
8376 hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);
8377
8378 /*
8379 * No longjmps to ring-3 from this point on!!!
8380 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
8381 * This also disables flushing of the R0-logger instance (if any).
8382 */
8383 VMMRZCallRing3Disable(pVCpu);
8384
8385 /*
8386 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
8387 * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
8388 *
8389 * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
8390 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
8391 *
8392 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
8393 * executing guest code.
8394 */
8395 pVmxTransient->uEflags = ASMIntDisableFlags();
8396 if ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
8397 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
8398 {
8399 hmR0VmxClearEventVmcs(pVCpu);
8400 ASMSetFlags(pVmxTransient->uEflags);
8401 VMMRZCallRing3Enable(pVCpu);
8402 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
8403 return VINF_EM_RAW_TO_R3;
8404 }
8405
8406 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
8407 {
8408 hmR0VmxClearEventVmcs(pVCpu);
8409 ASMSetFlags(pVmxTransient->uEflags);
8410 VMMRZCallRing3Enable(pVCpu);
8411 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
8412 return VINF_EM_RAW_INTERRUPT;
8413 }
8414
8415 /* We've injected any pending events. This is really the point of no return (to ring-3). */
8416 pVCpu->hm.s.Event.fPending = false;
8417
8418 return VINF_SUCCESS;
8419}
8420
8421
8422/**
8423 * Prepares to run guest code in VT-x and we've committed to doing so. This
8424 * means there is no backing out to ring-3 or anywhere else at this
8425 * point.
8426 *
8427 * @param pVM Pointer to the VM.
8428 * @param pVCpu Pointer to the VMCPU.
8429 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8430 * out-of-sync. Make sure to update the required fields
8431 * before using them.
8432 * @param pVmxTransient Pointer to the VMX transient structure.
8433 *
8434 * @remarks Called with preemption disabled.
8435 * @remarks No-long-jump zone!!!
8436 */
8437static void hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8438{
8439 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8440 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8441 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8442
8443 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8444 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
8445
8446#ifdef HMVMX_ALWAYS_SWAP_FPU_STATE
8447 if (!CPUMIsGuestFPUStateActive(pVCpu))
8448 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8449 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8450#endif
8451
8452 if ( pVCpu->hm.s.fUseGuestFpu
8453 && !CPUMIsGuestFPUStateActive(pVCpu))
8454 {
8455 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8456 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
8457 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8458 }
8459
8460 /*
8461 * Lazy-update of the host MSRs values in the auto-load/store MSR area.
8462 */
8463 if ( !pVCpu->hm.s.vmx.fUpdatedHostMsrs
8464 && pVCpu->hm.s.vmx.cMsrs > 0)
8465 {
8466 hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu);
8467 }
8468
8469 /*
8470 * Load the host state bits as we may've been preempted (only happens when
8471 * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM).
8472 */
8473 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
8474 {
8475 /* This ASSUMES that pfnStartVM has been set up already. */
8476 int rc = hmR0VmxSaveHostState(pVM, pVCpu);
8477 AssertRC(rc);
8478 STAM_COUNTER_INC(&pVCpu->hm.s.StatPreemptSaveHostState);
8479 }
8480 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT));
8481
8482 /*
8483 * Load the state shared between host and guest (FPU, debug, lazy MSRs).
8484 */
8485 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
8486 hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx);
8487 AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8488
8489 /* Store status of the shared guest-host state at the time of VM-entry. */
8490#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
8491 if (CPUMIsGuestInLongModeEx(pMixedCtx))
8492 {
8493 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
8494 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
8495 }
8496 else
8497#endif
8498 {
8499 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
8500 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
8501 }
8502 pVmxTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);
8503
8504 /*
8505 * Cache the TPR-shadow for checking on every VM-exit if it might have changed.
8506 */
8507 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8508 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
8509
8510 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
8511 RTCPUID idCurrentCpu = pCpu->idCpu;
8512 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
8513 || idCurrentCpu != pVCpu->hm.s.idLastCpu)
8514 {
8515 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu);
8516 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
8517 }
8518
8519 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
8520 hmR0VmxFlushTaggedTlb(pVCpu, pCpu); /* Invalidate the appropriate guest entries from the TLB. */
8521 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
8522 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */
8523
8524 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
8525
8526 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
8527 to start executing. */
8528
8529 /*
8530 * Load the TSC_AUX MSR when we are not intercepting RDTSCP.
8531 */
8532 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
8533 {
8534 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8535 {
8536 int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
8537 AssertRC(rc2);
8538 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS));
8539 bool fMsrUpdated = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMR0GetGuestTscAux(pVCpu),
8540 true /* fUpdateHostMsr */);
8541 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8542 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */
8543 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
8544 }
8545 else
8546 {
8547 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX);
8548 Assert(!pVCpu->hm.s.vmx.cMsrs || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8549 }
8550 }
8551
8552#ifdef VBOX_STRICT
8553 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu);
8554 hmR0VmxCheckHostEferMsr(pVCpu);
8555 AssertRC(hmR0VmxCheckVmcsCtls(pVCpu));
8556#endif
8557#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
8558 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
8559 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
8560 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
8561#endif
8562}
8563
8564
8565/**
8566 * Performs some essential restoration of state after running guest code in
8567 * VT-x.
8568 *
8569 * @param pVM Pointer to the VM.
8570 * @param pVCpu Pointer to the VMCPU.
8571 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
8572 * out-of-sync. Make sure to update the required fields
8573 * before using them.
8574 * @param pVmxTransient Pointer to the VMX transient structure.
8575 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
8576 *
8577 * @remarks Called with interrupts disabled, and returns with interrups enabled!
8578 *
8579 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
8580 * unconditionally when it is safe to do so.
8581 */
8582static void hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
8583{
8584 NOREF(pVM);
8585
8586 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8587
8588 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
8589 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
8590 HMVMXCPU_GST_RESET_TO(pVCpu, 0); /* Exits/longjmps to ring-3 requires saving the guest state. */
8591 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
8592 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
8593
8594 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8595 {
8596 /** @todo Find a way to fix hardcoding a guestimate. */
8597 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC()
8598 + pVCpu->hm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
8599 }
8600
8601 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
8602 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
8603 Assert(!(ASMGetFlags() & X86_EFL_IF));
8604 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8605
8606#ifdef HMVMX_ALWAYS_SWAP_FPU_STATE
8607 if (CPUMIsGuestFPUStateActive(pVCpu))
8608 {
8609 hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8610 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
8611 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8612 }
8613#endif
8614
8615#if HC_ARCH_BITS == 64
8616 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Host state messed up by VT-x, we must restore. */
8617#endif
8618 pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
8619#ifdef VBOX_STRICT
8620 hmR0VmxCheckHostEferMsr(pVCpu); /* Verify that VMRUN/VMLAUNCH didn't modify host EFER. */
8621#endif
8622 ASMSetFlags(pVmxTransient->uEflags); /* Enable interrupts. */
8623 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
8624
8625 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
8626 uint32_t uExitReason;
8627 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
8628 rc |= hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
8629 AssertRC(rc);
8630 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
8631 pVmxTransient->fVMEntryFailed = VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uEntryIntInfo);
8632
8633 /* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */
8634 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
8635 {
8636 Log4(("VM-entry failure: pVCpu=%p idCpu=%RU32 rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", pVCpu, pVCpu->idCpu, rcVMRun,
8637 pVmxTransient->fVMEntryFailed));
8638 return;
8639 }
8640
8641 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
8642 {
8643 /* Update the guest interruptibility-state from the VMCS. */
8644 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
8645
8646#if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
8647 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8648 AssertRC(rc);
8649#elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
8650 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8651 AssertRC(rc);
8652#endif
8653
8654 /*
8655 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
8656 * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3 and which is
8657 * why it's done here as it's easier and no less efficient to deal with it here than making hmR0VmxSaveGuestState()
8658 * cope with longjmps safely (see VMCPU_FF_HM_UPDATE_CR3 handling).
8659 */
8660 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8661 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
8662 {
8663 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
8664 AssertRC(rc);
8665 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
8666 }
8667 }
8668}
8669
8670
8671/**
8672 * Runs the guest code using VT-x the normal way.
8673 *
8674 * @returns VBox status code.
8675 * @param pVM Pointer to the VM.
8676 * @param pVCpu Pointer to the VMCPU.
8677 * @param pCtx Pointer to the guest-CPU context.
8678 *
8679 * @note Mostly the same as hmR0VmxRunGuestCodeStep().
8680 */
8681static int hmR0VmxRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8682{
8683 VMXTRANSIENT VmxTransient;
8684 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
8685 int rc = VERR_INTERNAL_ERROR_5;
8686 uint32_t cLoops = 0;
8687
8688 for (;; cLoops++)
8689 {
8690 Assert(!HMR0SuspendPending());
8691 HMVMX_ASSERT_CPU_SAFE();
8692
8693 /* Preparatory work for running guest code, this may force us to return
8694 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
8695 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
8696 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
8697 if (rc != VINF_SUCCESS)
8698 break;
8699
8700 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
8701 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
8702 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
8703
8704 /* Restore any residual host-state and save any bits shared between host
8705 and guest into the guest-CPU state. Re-enables interrupts! */
8706 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
8707
8708 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
8709 if (RT_UNLIKELY(rc != VINF_SUCCESS))
8710 {
8711 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
8712 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
8713 return rc;
8714 }
8715
8716 /* Handle the VM-exit. */
8717 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
8718 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
8719 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
8720 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
8721 HMVMX_START_EXIT_DISPATCH_PROF();
8722#ifdef HMVMX_USE_FUNCTION_TABLE
8723 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
8724#else
8725 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
8726#endif
8727 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
8728 if (rc != VINF_SUCCESS)
8729 break;
8730 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
8731 {
8732 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
8733 rc = VINF_EM_RAW_INTERRUPT;
8734 break;
8735 }
8736 }
8737
8738 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
8739 return rc;
8740}
8741
8742
8743/**
8744 * Single steps guest code using VT-x.
8745 *
8746 * @returns VBox status code.
8747 * @param pVM Pointer to the VM.
8748 * @param pVCpu Pointer to the VMCPU.
8749 * @param pCtx Pointer to the guest-CPU context.
8750 *
8751 * @note Mostly the same as hmR0VmxRunGuestCodeNormal().
8752 */
8753static int hmR0VmxRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8754{
8755 VMXTRANSIENT VmxTransient;
8756 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
8757 int rc = VERR_INTERNAL_ERROR_5;
8758 uint32_t cLoops = 0;
8759 uint16_t uCsStart = pCtx->cs.Sel;
8760 uint64_t uRipStart = pCtx->rip;
8761
8762 for (;; cLoops++)
8763 {
8764 Assert(!HMR0SuspendPending());
8765 HMVMX_ASSERT_CPU_SAFE();
8766
8767 /* Preparatory work for running guest code, this may force us to return
8768 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
8769 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
8770 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
8771 if (rc != VINF_SUCCESS)
8772 break;
8773
8774 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
8775 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
8776 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
8777
8778 /* Restore any residual host-state and save any bits shared between host
8779 and guest into the guest-CPU state. Re-enables interrupts! */
8780 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
8781
8782 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
8783 if (RT_UNLIKELY(rc != VINF_SUCCESS))
8784 {
8785 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
8786 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
8787 return rc;
8788 }
8789
8790 /* Handle the VM-exit. */
8791 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
8792 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
8793 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
8794 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
8795 HMVMX_START_EXIT_DISPATCH_PROF();
8796#ifdef HMVMX_USE_FUNCTION_TABLE
8797 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
8798#else
8799 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
8800#endif
8801 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
8802 if (rc != VINF_SUCCESS)
8803 break;
8804 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
8805 {
8806 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
8807 rc = VINF_EM_RAW_INTERRUPT;
8808 break;
8809 }
8810
8811 /*
8812 * Did the RIP change, if so, consider it a single step.
8813 * Otherwise, make sure one of the TFs gets set.
8814 */
8815 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pCtx);
8816 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pCtx);
8817 AssertRCReturn(rc2, rc2);
8818 if ( pCtx->rip != uRipStart
8819 || pCtx->cs.Sel != uCsStart)
8820 {
8821 rc = VINF_EM_DBG_STEPPED;
8822 break;
8823 }
8824 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
8825 }
8826
8827 /*
8828 * Clear the X86_EFL_TF if necessary.
8829 */
8830 if (pVCpu->hm.s.fClearTrapFlag)
8831 {
8832 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pCtx);
8833 AssertRCReturn(rc2, rc2);
8834 pVCpu->hm.s.fClearTrapFlag = false;
8835 pCtx->eflags.Bits.u1TF = 0;
8836 }
8837 /** @todo there seems to be issues with the resume flag when the monitor trap
8838 * flag is pending without being used. Seen early in bios init when
8839 * accessing APIC page in protected mode. */
8840
8841 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
8842 return rc;
8843}
8844
8845
8846/**
8847 * Runs the guest code using VT-x.
8848 *
8849 * @returns VBox status code.
8850 * @param pVM Pointer to the VM.
8851 * @param pVCpu Pointer to the VMCPU.
8852 * @param pCtx Pointer to the guest-CPU context.
8853 */
8854VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8855{
8856 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8857 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
8858 HMVMX_ASSERT_PREEMPT_SAFE();
8859
8860 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pCtx);
8861
8862 int rc;
8863 if (!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu))
8864 rc = hmR0VmxRunGuestCodeNormal(pVM, pVCpu, pCtx);
8865 else
8866 rc = hmR0VmxRunGuestCodeStep(pVM, pVCpu, pCtx);
8867
8868 if (rc == VERR_EM_INTERPRETER)
8869 rc = VINF_EM_RAW_EMULATE_INSTR;
8870 else if (rc == VINF_EM_RESET)
8871 rc = VINF_EM_TRIPLE_FAULT;
8872
8873 int rc2 = hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rc);
8874 if (RT_FAILURE(rc2))
8875 {
8876 pVCpu->hm.s.u32HMError = rc;
8877 rc = rc2;
8878 }
8879 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
8880 return rc;
8881}
8882
8883
8884#ifndef HMVMX_USE_FUNCTION_TABLE
8885DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
8886{
8887#ifdef DEBUG_ramshankar
8888# define SVVMCS() do { int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); AssertRC(rc2); } while (0)
8889# define LDVMCS() do { HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); } while (0)
8890#endif
8891 int rc;
8892 switch (rcReason)
8893 {
8894 case VMX_EXIT_EPT_MISCONFIG: /* SVVMCS(); */ rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8895 case VMX_EXIT_EPT_VIOLATION: /* SVVMCS(); */ rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8896 case VMX_EXIT_IO_INSTR: /* SVVMCS(); */ rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8897 case VMX_EXIT_CPUID: /* SVVMCS(); */ rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8898 case VMX_EXIT_RDTSC: /* SVVMCS(); */ rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8899 case VMX_EXIT_RDTSCP: /* SVVMCS(); */ rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8900 case VMX_EXIT_APIC_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8901 case VMX_EXIT_XCPT_OR_NMI: /* SVVMCS(); */ rc = hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8902 case VMX_EXIT_MOV_CRX: /* SVVMCS(); */ rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8903 case VMX_EXIT_EXT_INT: /* SVVMCS(); */ rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8904 case VMX_EXIT_INT_WINDOW: /* SVVMCS(); */ rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8905 case VMX_EXIT_MWAIT: /* SVVMCS(); */ rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8906 case VMX_EXIT_MONITOR: /* SVVMCS(); */ rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8907 case VMX_EXIT_TASK_SWITCH: /* SVVMCS(); */ rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8908 case VMX_EXIT_PREEMPT_TIMER: /* SVVMCS(); */ rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8909 case VMX_EXIT_RDMSR: /* SVVMCS(); */ rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8910 case VMX_EXIT_WRMSR: /* SVVMCS(); */ rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8911 case VMX_EXIT_MOV_DRX: /* SVVMCS(); */ rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8912 case VMX_EXIT_TPR_BELOW_THRESHOLD: /* SVVMCS(); */ rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8913 case VMX_EXIT_HLT: /* SVVMCS(); */ rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8914 case VMX_EXIT_INVD: /* SVVMCS(); */ rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8915 case VMX_EXIT_INVLPG: /* SVVMCS(); */ rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8916 case VMX_EXIT_RSM: /* SVVMCS(); */ rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8917 case VMX_EXIT_MTF: /* SVVMCS(); */ rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8918 case VMX_EXIT_PAUSE: /* SVVMCS(); */ rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8919 case VMX_EXIT_XDTR_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8920 case VMX_EXIT_TR_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8921 case VMX_EXIT_WBINVD: /* SVVMCS(); */ rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8922 case VMX_EXIT_XSETBV: /* SVVMCS(); */ rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8923 case VMX_EXIT_RDRAND: /* SVVMCS(); */ rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8924 case VMX_EXIT_INVPCID: /* SVVMCS(); */ rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8925 case VMX_EXIT_GETSEC: /* SVVMCS(); */ rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8926 case VMX_EXIT_RDPMC: /* SVVMCS(); */ rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8927 case VMX_EXIT_VMCALL: /* SVVMCS(); */ rc = hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8928
8929 case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break;
8930 case VMX_EXIT_NMI_WINDOW: rc = hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient); break;
8931 case VMX_EXIT_INIT_SIGNAL: rc = hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient); break;
8932 case VMX_EXIT_SIPI: rc = hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient); break;
8933 case VMX_EXIT_IO_SMI: rc = hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient); break;
8934 case VMX_EXIT_SMI: rc = hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient); break;
8935 case VMX_EXIT_ERR_MSR_LOAD: rc = hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient); break;
8936 case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient); break;
8937 case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient); break;
8938
8939 case VMX_EXIT_VMCLEAR:
8940 case VMX_EXIT_VMLAUNCH:
8941 case VMX_EXIT_VMPTRLD:
8942 case VMX_EXIT_VMPTRST:
8943 case VMX_EXIT_VMREAD:
8944 case VMX_EXIT_VMRESUME:
8945 case VMX_EXIT_VMWRITE:
8946 case VMX_EXIT_VMXOFF:
8947 case VMX_EXIT_VMXON:
8948 case VMX_EXIT_INVEPT:
8949 case VMX_EXIT_INVVPID:
8950 case VMX_EXIT_VMFUNC:
8951 rc = hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
8952 break;
8953 default:
8954 rc = hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
8955 break;
8956 }
8957 return rc;
8958}
8959#endif
8960
8961#ifdef DEBUG
8962/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
8963# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
8964 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
8965
8966# define HMVMX_ASSERT_PREEMPT_CPUID() \
8967 do \
8968 { \
8969 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
8970 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
8971 } while (0)
8972
8973# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
8974 do { \
8975 AssertPtr(pVCpu); \
8976 AssertPtr(pMixedCtx); \
8977 AssertPtr(pVmxTransient); \
8978 Assert(pVmxTransient->fVMEntryFailed == false); \
8979 Assert(ASMIntAreEnabled()); \
8980 HMVMX_ASSERT_PREEMPT_SAFE(); \
8981 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
8982 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", pVCpu->idCpu)); \
8983 HMVMX_ASSERT_PREEMPT_SAFE(); \
8984 if (VMMR0IsLogFlushDisabled(pVCpu)) \
8985 HMVMX_ASSERT_PREEMPT_CPUID(); \
8986 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
8987 } while (0)
8988
8989# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
8990 do { \
8991 Log4Func(("\n")); \
8992 } while (0)
8993#else /* Release builds */
8994# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
8995 do { \
8996 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
8997 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient); \
8998 } while (0)
8999# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while (0)
9000#endif
9001
9002
9003/**
9004 * Advances the guest RIP after reading it from the VMCS.
9005 *
9006 * @returns VBox status code.
9007 * @param pVCpu Pointer to the VMCPU.
9008 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
9009 * out-of-sync. Make sure to update the required fields
9010 * before using them.
9011 * @param pVmxTransient Pointer to the VMX transient structure.
9012 *
9013 * @remarks No-long-jump zone!!!
9014 */
9015DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9016{
9017 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
9018 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
9019 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
9020 AssertRCReturn(rc, rc);
9021
9022 pMixedCtx->rip += pVmxTransient->cbInstr;
9023 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
9024
9025 /*
9026 * Deliver a debug exception to the guest if it is single-stepping. Don't directly inject a #DB but use the
9027 * pending debug exception field as it takes care of priority of events.
9028 *
9029 * See Intel spec. 32.2.1 "Debug Exceptions".
9030 */
9031 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
9032
9033 return rc;
9034}
9035
9036
9037/**
9038 * Tries to determine what part of the guest-state VT-x has deemed as invalid
9039 * and update error record fields accordingly.
9040 *
9041 * @return VMX_IGS_* return codes.
9042 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
9043 * wrong with the guest state.
9044 *
9045 * @param pVM Pointer to the VM.
9046 * @param pVCpu Pointer to the VMCPU.
9047 * @param pCtx Pointer to the guest-CPU state.
9048 *
9049 * @remarks This function assumes our cache of the VMCS controls
9050 * are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
9051 */
9052static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
9053{
9054#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
9055#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \
9056 uError = (err); \
9057 break; \
9058 } else do { } while (0)
9059
9060 int rc;
9061 uint32_t uError = VMX_IGS_ERROR;
9062 uint32_t u32Val;
9063 bool fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
9064
9065 do
9066 {
9067 /*
9068 * CR0.
9069 */
9070 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
9071 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
9072 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG).
9073 See Intel spec. 26.3.1 "Checks on guest Guest Control Registers, Debug Registers and MSRs." */
9074 if (fUnrestrictedGuest)
9075 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
9076
9077 uint32_t u32GuestCR0;
9078 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCR0);
9079 AssertRCBreak(rc);
9080 HMVMX_CHECK_BREAK((u32GuestCR0 & uSetCR0) == uSetCR0, VMX_IGS_CR0_FIXED1);
9081 HMVMX_CHECK_BREAK(!(u32GuestCR0 & ~uZapCR0), VMX_IGS_CR0_FIXED0);
9082 if ( !fUnrestrictedGuest
9083 && (u32GuestCR0 & X86_CR0_PG)
9084 && !(u32GuestCR0 & X86_CR0_PE))
9085 {
9086 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
9087 }
9088
9089 /*
9090 * CR4.
9091 */
9092 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
9093 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
9094
9095 uint32_t u32GuestCR4;
9096 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCR4);
9097 AssertRCBreak(rc);
9098 HMVMX_CHECK_BREAK((u32GuestCR4 & uSetCR4) == uSetCR4, VMX_IGS_CR4_FIXED1);
9099 HMVMX_CHECK_BREAK(!(u32GuestCR4 & ~uZapCR4), VMX_IGS_CR4_FIXED0);
9100
9101 /*
9102 * IA32_DEBUGCTL MSR.
9103 */
9104 uint64_t u64Val;
9105 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
9106 AssertRCBreak(rc);
9107 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
9108 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
9109 {
9110 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
9111 }
9112 uint64_t u64DebugCtlMsr = u64Val;
9113
9114#ifdef VBOX_STRICT
9115 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
9116 AssertRCBreak(rc);
9117 Assert(u32Val == pVCpu->hm.s.vmx.u32EntryCtls);
9118#endif
9119 bool const fLongModeGuest = RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
9120
9121 /*
9122 * RIP and RFLAGS.
9123 */
9124 uint32_t u32Eflags;
9125#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9126 if (HMVMX_IS_64BIT_HOST_MODE())
9127 {
9128 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
9129 AssertRCBreak(rc);
9130 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
9131 if ( !fLongModeGuest
9132 || !pCtx->cs.Attr.n.u1Long)
9133 {
9134 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
9135 }
9136 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
9137 * must be identical if the "IA-32e mode guest" VM-entry
9138 * control is 1 and CS.L is 1. No check applies if the
9139 * CPU supports 64 linear-address bits. */
9140
9141 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
9142 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
9143 AssertRCBreak(rc);
9144 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
9145 VMX_IGS_RFLAGS_RESERVED);
9146 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
9147 u32Eflags = u64Val;
9148 }
9149 else
9150#endif
9151 {
9152 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
9153 AssertRCBreak(rc);
9154 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */
9155 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
9156 }
9157
9158 if ( fLongModeGuest
9159 || ( fUnrestrictedGuest
9160 && !(u32GuestCR0 & X86_CR0_PE)))
9161 {
9162 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
9163 }
9164
9165 uint32_t u32EntryInfo;
9166 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
9167 AssertRCBreak(rc);
9168 if ( VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
9169 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
9170 {
9171 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
9172 }
9173
9174 /*
9175 * 64-bit checks.
9176 */
9177#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9178 if (HMVMX_IS_64BIT_HOST_MODE())
9179 {
9180 if ( fLongModeGuest
9181 && !fUnrestrictedGuest)
9182 {
9183 HMVMX_CHECK_BREAK(u32GuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
9184 HMVMX_CHECK_BREAK(u32GuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
9185 }
9186
9187 if ( !fLongModeGuest
9188 && (u32GuestCR4 & X86_CR4_PCIDE))
9189 {
9190 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
9191 }
9192
9193 /** @todo CR3 field must be such that bits 63:52 and bits in the range
9194 * 51:32 beyond the processor's physical-address width are 0. */
9195
9196 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
9197 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
9198 {
9199 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
9200 }
9201
9202 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
9203 AssertRCBreak(rc);
9204 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
9205
9206 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
9207 AssertRCBreak(rc);
9208 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
9209 }
9210#endif
9211
9212 /*
9213 * PERF_GLOBAL MSR.
9214 */
9215 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR)
9216 {
9217 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
9218 AssertRCBreak(rc);
9219 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
9220 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
9221 }
9222
9223 /*
9224 * PAT MSR.
9225 */
9226 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR)
9227 {
9228 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
9229 AssertRCBreak(rc);
9230 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
9231 for (unsigned i = 0; i < 8; i++)
9232 {
9233 uint8_t u8Val = (u64Val & 0x7);
9234 if ( u8Val != 0 /* UC */
9235 || u8Val != 1 /* WC */
9236 || u8Val != 4 /* WT */
9237 || u8Val != 5 /* WP */
9238 || u8Val != 6 /* WB */
9239 || u8Val != 7 /* UC- */)
9240 {
9241 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
9242 }
9243 u64Val >>= 3;
9244 }
9245 }
9246
9247 /*
9248 * EFER MSR.
9249 */
9250 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
9251 {
9252 Assert(pVM->hm.s.vmx.fSupportsVmcsEfer);
9253 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
9254 AssertRCBreak(rc);
9255 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
9256 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
9257 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
9258 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
9259 HMVMX_CHECK_BREAK( fUnrestrictedGuest
9260 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u32GuestCR0 & X86_CR0_PG),
9261 VMX_IGS_EFER_LMA_PG_MISMATCH);
9262 }
9263
9264 /*
9265 * Segment registers.
9266 */
9267 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9268 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
9269 if (!(u32Eflags & X86_EFL_VM))
9270 {
9271 /* CS */
9272 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
9273 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
9274 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
9275 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
9276 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9277 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
9278 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9279 /* CS cannot be loaded with NULL in protected mode. */
9280 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
9281 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
9282 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
9283 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
9284 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
9285 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
9286 else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
9287 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
9288 else
9289 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
9290
9291 /* SS */
9292 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9293 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
9294 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
9295 if ( !(pCtx->cr0 & X86_CR0_PE)
9296 || pCtx->cs.Attr.n.u4Type == 3)
9297 {
9298 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
9299 }
9300 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
9301 {
9302 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
9303 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
9304 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
9305 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
9306 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
9307 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9308 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
9309 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9310 }
9311
9312 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
9313 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
9314 {
9315 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
9316 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
9317 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9318 || pCtx->ds.Attr.n.u4Type > 11
9319 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9320 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
9321 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
9322 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
9323 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9324 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
9325 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9326 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9327 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
9328 }
9329 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
9330 {
9331 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
9332 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
9333 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9334 || pCtx->es.Attr.n.u4Type > 11
9335 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9336 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
9337 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
9338 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
9339 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9340 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
9341 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9342 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9343 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
9344 }
9345 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
9346 {
9347 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
9348 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
9349 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9350 || pCtx->fs.Attr.n.u4Type > 11
9351 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
9352 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
9353 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
9354 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
9355 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
9356 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
9357 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
9358 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9359 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
9360 }
9361 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
9362 {
9363 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
9364 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
9365 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9366 || pCtx->gs.Attr.n.u4Type > 11
9367 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
9368 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
9369 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
9370 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
9371 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
9372 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
9373 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
9374 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9375 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
9376 }
9377 /* 64-bit capable CPUs. */
9378#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9379 if (HMVMX_IS_64BIT_HOST_MODE())
9380 {
9381 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
9382 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
9383 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9384 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
9385 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
9386 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
9387 VMX_IGS_LONGMODE_SS_BASE_INVALID);
9388 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
9389 VMX_IGS_LONGMODE_DS_BASE_INVALID);
9390 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
9391 VMX_IGS_LONGMODE_ES_BASE_INVALID);
9392 }
9393#endif
9394 }
9395 else
9396 {
9397 /* V86 mode checks. */
9398 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
9399 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
9400 {
9401 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
9402 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
9403 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
9404 }
9405 else
9406 {
9407 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
9408 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
9409 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
9410 }
9411
9412 /* CS */
9413 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
9414 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
9415 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
9416 /* SS */
9417 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
9418 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
9419 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
9420 /* DS */
9421 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
9422 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
9423 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
9424 /* ES */
9425 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
9426 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
9427 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
9428 /* FS */
9429 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
9430 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
9431 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
9432 /* GS */
9433 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
9434 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
9435 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
9436 /* 64-bit capable CPUs. */
9437#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9438 if (HMVMX_IS_64BIT_HOST_MODE())
9439 {
9440 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
9441 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
9442 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9443 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
9444 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
9445 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
9446 VMX_IGS_LONGMODE_SS_BASE_INVALID);
9447 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
9448 VMX_IGS_LONGMODE_DS_BASE_INVALID);
9449 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
9450 VMX_IGS_LONGMODE_ES_BASE_INVALID);
9451 }
9452#endif
9453 }
9454
9455 /*
9456 * TR.
9457 */
9458 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
9459 /* 64-bit capable CPUs. */
9460#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9461 if (HMVMX_IS_64BIT_HOST_MODE())
9462 {
9463 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
9464 }
9465#endif
9466 if (fLongModeGuest)
9467 {
9468 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
9469 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
9470 }
9471 else
9472 {
9473 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
9474 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
9475 VMX_IGS_TR_ATTR_TYPE_INVALID);
9476 }
9477 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
9478 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
9479 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
9480 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
9481 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
9482 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
9483 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
9484 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
9485
9486 /*
9487 * GDTR and IDTR.
9488 */
9489#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9490 if (HMVMX_IS_64BIT_HOST_MODE())
9491 {
9492 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
9493 AssertRCBreak(rc);
9494 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
9495
9496 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
9497 AssertRCBreak(rc);
9498 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
9499 }
9500#endif
9501
9502 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
9503 AssertRCBreak(rc);
9504 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
9505
9506 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
9507 AssertRCBreak(rc);
9508 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
9509
9510 /*
9511 * Guest Non-Register State.
9512 */
9513 /* Activity State. */
9514 uint32_t u32ActivityState;
9515 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
9516 AssertRCBreak(rc);
9517 HMVMX_CHECK_BREAK( !u32ActivityState
9518 || (u32ActivityState & MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.Msrs.u64Misc)),
9519 VMX_IGS_ACTIVITY_STATE_INVALID);
9520 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
9521 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
9522 uint32_t u32IntrState;
9523 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32IntrState);
9524 AssertRCBreak(rc);
9525 if ( u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
9526 || u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9527 {
9528 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
9529 }
9530
9531 /** @todo Activity state and injecting interrupts. Left as a todo since we
9532 * currently don't use activity states but ACTIVE. */
9533
9534 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
9535 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
9536
9537 /* Guest interruptibility-state. */
9538 HMVMX_CHECK_BREAK(!(u32IntrState & 0xfffffff0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
9539 HMVMX_CHECK_BREAK((u32IntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
9540 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS))
9541 != ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
9542 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9543 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
9544 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
9545 || !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
9546 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
9547 if (VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo))
9548 {
9549 if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
9550 {
9551 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9552 && !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9553 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
9554 }
9555 else if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9556 {
9557 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9558 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
9559 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
9560 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
9561 }
9562 }
9563 /** @todo Assumes the processor is not in SMM. */
9564 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
9565 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
9566 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
9567 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
9568 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
9569 if ( (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
9570 && VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
9571 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9572 {
9573 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI),
9574 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
9575 }
9576
9577 /* Pending debug exceptions. */
9578 if (HMVMX_IS_64BIT_HOST_MODE())
9579 {
9580 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val);
9581 AssertRCBreak(rc);
9582 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
9583 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
9584 u32Val = u64Val; /* For pending debug exceptions checks below. */
9585 }
9586 else
9587 {
9588 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val);
9589 AssertRCBreak(rc);
9590 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
9591 HMVMX_CHECK_BREAK(!(u64Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
9592 }
9593
9594 if ( (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9595 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS)
9596 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
9597 {
9598 if ( (u32Eflags & X86_EFL_TF)
9599 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
9600 {
9601 /* Bit 14 is PendingDebug.BS. */
9602 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
9603 }
9604 if ( !(u32Eflags & X86_EFL_TF)
9605 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
9606 {
9607 /* Bit 14 is PendingDebug.BS. */
9608 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
9609 }
9610 }
9611
9612 /* VMCS link pointer. */
9613 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
9614 AssertRCBreak(rc);
9615 if (u64Val != UINT64_C(0xffffffffffffffff))
9616 {
9617 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
9618 /** @todo Bits beyond the processor's physical-address width MBZ. */
9619 /** @todo 32-bit located in memory referenced by value of this field (as a
9620 * physical address) must contain the processor's VMCS revision ID. */
9621 /** @todo SMM checks. */
9622 }
9623
9624 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
9625 * not using Nested Paging? */
9626 if ( pVM->hm.s.fNestedPaging
9627 && !fLongModeGuest
9628 && CPUMIsGuestInPAEModeEx(pCtx))
9629 {
9630 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
9631 AssertRCBreak(rc);
9632 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9633
9634 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
9635 AssertRCBreak(rc);
9636 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9637
9638 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
9639 AssertRCBreak(rc);
9640 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9641
9642 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
9643 AssertRCBreak(rc);
9644 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9645 }
9646
9647 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
9648 if (uError == VMX_IGS_ERROR)
9649 uError = VMX_IGS_REASON_NOT_FOUND;
9650 } while (0);
9651
9652 pVCpu->hm.s.u32HMError = uError;
9653 return uError;
9654
9655#undef HMVMX_ERROR_BREAK
9656#undef HMVMX_CHECK_BREAK
9657}
9658
9659/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9660/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
9661/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9662
9663/** @name VM-exit handlers.
9664 * @{
9665 */
9666
9667/**
9668 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
9669 */
9670HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9671{
9672 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9673 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
9674 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
9675 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
9676 return VINF_SUCCESS;
9677 return VINF_EM_RAW_INTERRUPT;
9678}
9679
9680
9681/**
9682 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9683 */
9684HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9685{
9686 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9687 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
9688
9689 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
9690 AssertRCReturn(rc, rc);
9691
9692 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
9693 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)
9694 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
9695 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
9696
9697 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9698 {
9699 /*
9700 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we injected it ourselves and
9701 * anything we inject is not going to cause a VM-exit directly for the event being injected.
9702 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
9703 *
9704 * Dispatch the NMI to the host. See Intel spec. 27.5.5 "Updating Non-Register State".
9705 */
9706 VMXDispatchHostNmi();
9707 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
9708 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
9709 return VINF_SUCCESS;
9710 }
9711
9712 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
9713 rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
9714 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
9715 {
9716 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
9717 return VINF_SUCCESS;
9718 }
9719 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
9720 {
9721 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
9722 return rc;
9723 }
9724
9725 uint32_t uExitIntInfo = pVmxTransient->uExitIntInfo;
9726 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntInfo);
9727 switch (uIntType)
9728 {
9729 case VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT: /* Privileged software exception. (#DB from ICEBP) */
9730 Assert(uVector == X86_XCPT_DB);
9731 /* no break */
9732 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
9733 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT);
9734 /* no break */
9735 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
9736 {
9737 switch (uVector)
9738 {
9739 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
9740 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
9741 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;
9742 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
9743 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
9744 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
9745#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
9746 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
9747 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9748 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
9749 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9750 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
9751 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9752 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
9753 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9754 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
9755 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9756 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS);
9757 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9758#endif
9759 default:
9760 {
9761 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
9762 AssertRCReturn(rc, rc);
9763
9764 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
9765 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
9766 {
9767 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
9768 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
9769 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
9770
9771 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
9772 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
9773 AssertRCReturn(rc, rc);
9774 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
9775 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode,
9776 0 /* GCPtrFaultAddress */);
9777 AssertRCReturn(rc, rc);
9778 }
9779 else
9780 {
9781 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
9782 pVCpu->hm.s.u32HMError = uVector;
9783 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
9784 }
9785 break;
9786 }
9787 }
9788 break;
9789 }
9790
9791 default:
9792 {
9793 pVCpu->hm.s.u32HMError = uExitIntInfo;
9794 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9795 AssertMsgFailed(("Unexpected interruption info %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntInfo)));
9796 break;
9797 }
9798 }
9799 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
9800 return rc;
9801}
9802
9803
9804/**
9805 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9806 */
9807HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9808{
9809 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9810
9811 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
9812 hmR0VmxClearIntWindowExitVmcs(pVCpu);
9813
9814 /* Deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and resume guest execution. */
9815 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
9816 return VINF_SUCCESS;
9817}
9818
9819
9820/**
9821 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9822 */
9823HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9824{
9825 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9826 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
9827 HMVMX_RETURN_UNEXPECTED_EXIT();
9828}
9829
9830
9831/**
9832 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
9833 */
9834HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9835{
9836 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9837 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
9838 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9839}
9840
9841
9842/**
9843 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
9844 */
9845HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9846{
9847 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9848 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
9849 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9850}
9851
9852
9853/**
9854 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
9855 */
9856HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9857{
9858 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9859 PVM pVM = pVCpu->CTX_SUFF(pVM);
9860 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
9861 if (RT_LIKELY(rc == VINF_SUCCESS))
9862 {
9863 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9864 Assert(pVmxTransient->cbInstr == 2);
9865 }
9866 else
9867 {
9868 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
9869 rc = VERR_EM_INTERPRETER;
9870 }
9871 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
9872 return rc;
9873}
9874
9875
9876/**
9877 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
9878 */
9879HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9880{
9881 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9882 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
9883 AssertRCReturn(rc, rc);
9884
9885 if (pMixedCtx->cr4 & X86_CR4_SMXE)
9886 return VINF_EM_RAW_EMULATE_INSTR;
9887
9888 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
9889 HMVMX_RETURN_UNEXPECTED_EXIT();
9890}
9891
9892
9893/**
9894 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
9895 */
9896HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9897{
9898 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9899 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
9900 AssertRCReturn(rc, rc);
9901
9902 PVM pVM = pVCpu->CTX_SUFF(pVM);
9903 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
9904 if (RT_LIKELY(rc == VINF_SUCCESS))
9905 {
9906 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9907 Assert(pVmxTransient->cbInstr == 2);
9908 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
9909 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
9910 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
9911 }
9912 else
9913 {
9914 AssertMsgFailed(("hmR0VmxExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc));
9915 rc = VERR_EM_INTERPRETER;
9916 }
9917 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
9918 return rc;
9919}
9920
9921
9922/**
9923 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
9924 */
9925HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9926{
9927 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9928 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
9929 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
9930 AssertRCReturn(rc, rc);
9931
9932 PVM pVM = pVCpu->CTX_SUFF(pVM);
9933 rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
9934 if (RT_LIKELY(rc == VINF_SUCCESS))
9935 {
9936 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9937 Assert(pVmxTransient->cbInstr == 3);
9938 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
9939 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
9940 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
9941 }
9942 else
9943 {
9944 AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
9945 rc = VERR_EM_INTERPRETER;
9946 }
9947 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
9948 return rc;
9949}
9950
9951
9952/**
9953 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
9954 */
9955HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9956{
9957 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9958 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
9959 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
9960 AssertRCReturn(rc, rc);
9961
9962 PVM pVM = pVCpu->CTX_SUFF(pVM);
9963 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
9964 if (RT_LIKELY(rc == VINF_SUCCESS))
9965 {
9966 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9967 Assert(pVmxTransient->cbInstr == 2);
9968 }
9969 else
9970 {
9971 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
9972 rc = VERR_EM_INTERPRETER;
9973 }
9974 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
9975 return rc;
9976}
9977
9978
9979/**
9980 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
9981 */
9982HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9983{
9984 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9985
9986 int rc = VERR_NOT_SUPPORTED;
9987 if (GIMAreHypercallsEnabled(pVCpu))
9988 {
9989 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9990 AssertRCReturn(rc, rc);
9991
9992 rc = GIMHypercall(pVCpu, pMixedCtx);
9993 }
9994 if (rc != VINF_SUCCESS)
9995 {
9996 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
9997 rc = VINF_SUCCESS;
9998 }
9999
10000 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);
10001 return rc;
10002}
10003
10004
10005/**
10006 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10007 */
10008HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10009{
10010 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10011 PVM pVM = pVCpu->CTX_SUFF(pVM);
10012 Assert(!pVM->hm.s.fNestedPaging);
10013
10014 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10015 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
10016 AssertRCReturn(rc, rc);
10017
10018 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
10019 rc = VBOXSTRICTRC_VAL(rc2);
10020 if (RT_LIKELY(rc == VINF_SUCCESS))
10021 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10022 else
10023 {
10024 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RX64 failed with %Rrc\n",
10025 pVmxTransient->uExitQualification, rc));
10026 }
10027 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
10028 return rc;
10029}
10030
10031
10032/**
10033 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10034 */
10035HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10036{
10037 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10038 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10039 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10040 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10041 AssertRCReturn(rc, rc);
10042
10043 PVM pVM = pVCpu->CTX_SUFF(pVM);
10044 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10045 if (RT_LIKELY(rc == VINF_SUCCESS))
10046 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10047 else
10048 {
10049 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
10050 rc = VERR_EM_INTERPRETER;
10051 }
10052 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
10053 return rc;
10054}
10055
10056
10057/**
10058 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10059 */
10060HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10061{
10062 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10063 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10064 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10065 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10066 AssertRCReturn(rc, rc);
10067
10068 PVM pVM = pVCpu->CTX_SUFF(pVM);
10069 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10070 rc = VBOXSTRICTRC_VAL(rc2);
10071 if (RT_LIKELY( rc == VINF_SUCCESS
10072 || rc == VINF_EM_HALT))
10073 {
10074 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10075 AssertRCReturn(rc3, rc3);
10076
10077 if ( rc == VINF_EM_HALT
10078 && EMMonitorWaitShouldContinue(pVCpu, pMixedCtx))
10079 {
10080 rc = VINF_SUCCESS;
10081 }
10082 }
10083 else
10084 {
10085 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
10086 rc = VERR_EM_INTERPRETER;
10087 }
10088 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
10089 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
10090 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
10091 return rc;
10092}
10093
10094
10095/**
10096 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
10097 */
10098HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10099{
10100 /*
10101 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
10102 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
10103 * executing VMCALL in VMX root operation. If we get here, something funny is going on.
10104 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
10105 */
10106 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10107 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10108 HMVMX_RETURN_UNEXPECTED_EXIT();
10109}
10110
10111
10112/**
10113 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
10114 */
10115HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10116{
10117 /*
10118 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
10119 * root operation. Only an STM (SMM transfer monitor) would get this exit when we (the executive monitor) execute a VMCALL
10120 * in VMX root mode or receive an SMI. If we get here, something funny is going on.
10121 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
10122 */
10123 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10124 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10125 HMVMX_RETURN_UNEXPECTED_EXIT();
10126}
10127
10128
10129/**
10130 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
10131 */
10132HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10133{
10134 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
10135 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10136 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10137 HMVMX_RETURN_UNEXPECTED_EXIT();
10138}
10139
10140
10141/**
10142 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
10143 */
10144HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10145{
10146 /*
10147 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
10148 * don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
10149 * See Intel spec. 25.3 "Other Causes of VM-exits".
10150 */
10151 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10152 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10153 HMVMX_RETURN_UNEXPECTED_EXIT();
10154}
10155
10156
10157/**
10158 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
10159 * VM-exit.
10160 */
10161HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10162{
10163 /*
10164 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
10165 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery" and Intel spec. 29.3 "VMX Instructions" for "VMXON".
10166 *
10167 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these VM-exits.
10168 * See Intel spec. "23.8 Restrictions on VMX operation".
10169 */
10170 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10171 return VINF_SUCCESS;
10172}
10173
10174
10175/**
10176 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
10177 * VM-exit.
10178 */
10179HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10180{
10181 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10182 return VINF_EM_RESET;
10183}
10184
10185
10186/**
10187 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10188 */
10189HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10190{
10191 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10192 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
10193 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
10194 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10195 AssertRCReturn(rc, rc);
10196
10197 pMixedCtx->rip++;
10198 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
10199 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
10200 rc = VINF_SUCCESS;
10201 else
10202 rc = VINF_EM_HALT;
10203
10204 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
10205 return rc;
10206}
10207
10208
10209/**
10210 * VM-exit handler for instructions that result in a #UD exception delivered to
10211 * the guest.
10212 */
10213HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10214{
10215 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10216 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
10217 return VINF_SUCCESS;
10218}
10219
10220
10221/**
10222 * VM-exit handler for expiry of the VMX preemption timer.
10223 */
10224HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10225{
10226 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10227
10228 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
10229 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10230
10231 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
10232 PVM pVM = pVCpu->CTX_SUFF(pVM);
10233 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
10234 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
10235 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
10236}
10237
10238
10239/**
10240 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
10241 */
10242HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10243{
10244 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10245
10246 /* We expose XSETBV to the guest, fallback to the recompiler for emulation. */
10247 /** @todo check if XSETBV is supported by the recompiler. */
10248 return VERR_EM_INTERPRETER;
10249}
10250
10251
10252/**
10253 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10254 */
10255HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10256{
10257 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10258
10259 /* The guest should not invalidate the host CPU's TLBs, fallback to recompiler. */
10260 /** @todo implement EMInterpretInvpcid() */
10261 return VERR_EM_INTERPRETER;
10262}
10263
10264
10265/**
10266 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
10267 * Error VM-exit.
10268 */
10269HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10270{
10271 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10272 AssertRCReturn(rc, rc);
10273
10274 rc = hmR0VmxCheckVmcsCtls(pVCpu);
10275 AssertRCReturn(rc, rc);
10276
10277 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
10278 NOREF(uInvalidReason);
10279
10280#ifdef VBOX_STRICT
10281 uint32_t uIntrState;
10282 HMVMXHCUINTREG uHCReg;
10283 uint64_t u64Val;
10284 uint32_t u32Val;
10285
10286 rc = hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
10287 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
10288 rc |= hmR0VmxReadEntryInstrLenVmcs(pVmxTransient);
10289 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
10290 AssertRCReturn(rc, rc);
10291
10292 Log4(("uInvalidReason %u\n", uInvalidReason));
10293 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
10294 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
10295 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
10296 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));
10297
10298 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
10299 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
10300 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
10301 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
10302 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
10303 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
10304 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
10305 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
10306 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
10307 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
10308 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
10309 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
10310#else
10311 NOREF(pVmxTransient);
10312#endif
10313
10314 HMDumpRegs(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
10315 return VERR_VMX_INVALID_GUEST_STATE;
10316}
10317
10318
10319/**
10320 * VM-exit handler for VM-entry failure due to an MSR-load
10321 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
10322 */
10323HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10324{
10325 NOREF(pVmxTransient);
10326 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
10327 HMVMX_RETURN_UNEXPECTED_EXIT();
10328}
10329
10330
10331/**
10332 * VM-exit handler for VM-entry failure due to a machine-check event
10333 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
10334 */
10335HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10336{
10337 NOREF(pVmxTransient);
10338 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
10339 HMVMX_RETURN_UNEXPECTED_EXIT();
10340}
10341
10342
10343/**
10344 * VM-exit handler for all undefined reasons. Should never ever happen.. in
10345 * theory.
10346 */
10347HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10348{
10349 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
10350 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient);
10351 return VERR_VMX_UNDEFINED_EXIT_CODE;
10352}
10353
10354
10355/**
10356 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
10357 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
10358 * Conditional VM-exit.
10359 */
10360HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10361{
10362 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10363
10364 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
10365 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
10366 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
10367 return VERR_EM_INTERPRETER;
10368 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10369 HMVMX_RETURN_UNEXPECTED_EXIT();
10370}
10371
10372
10373/**
10374 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
10375 */
10376HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10377{
10378 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10379
10380 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
10381 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
10382 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
10383 return VERR_EM_INTERPRETER;
10384 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10385 HMVMX_RETURN_UNEXPECTED_EXIT();
10386}
10387
10388
10389/**
10390 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10391 */
10392HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10393{
10394 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10395
10396 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
10397 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10398 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10399 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10400 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10401 {
10402 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
10403 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
10404 }
10405 AssertRCReturn(rc, rc);
10406 Log4(("CS:RIP=%04x:%#RX64 ECX=%X\n", pMixedCtx->cs.Sel, pMixedCtx->rip, pMixedCtx->ecx));
10407
10408#ifdef VBOX_STRICT
10409 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
10410 {
10411 if ( hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)
10412 && pMixedCtx->ecx != MSR_K6_EFER)
10413 {
10414 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
10415 HMVMX_RETURN_UNEXPECTED_EXIT();
10416 }
10417# if HC_ARCH_BITS == 64
10418 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests
10419 && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10420 {
10421 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
10422 HMVMX_RETURN_UNEXPECTED_EXIT();
10423 }
10424# endif
10425 }
10426#endif
10427
10428 PVM pVM = pVCpu->CTX_SUFF(pVM);
10429 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10430 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
10431 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
10432 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
10433 if (RT_LIKELY(rc == VINF_SUCCESS))
10434 {
10435 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10436 Assert(pVmxTransient->cbInstr == 2);
10437 }
10438 return rc;
10439}
10440
10441
10442/**
10443 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10444 */
10445HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10446{
10447 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10448 PVM pVM = pVCpu->CTX_SUFF(pVM);
10449 int rc = VINF_SUCCESS;
10450
10451 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
10452 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10453 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10454 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10455 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10456 {
10457 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
10458 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
10459 }
10460 AssertRCReturn(rc, rc);
10461 Log4(("ecx=%#RX32\n", pMixedCtx->ecx));
10462
10463 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10464 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
10465 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
10466
10467 if (RT_LIKELY(rc == VINF_SUCCESS))
10468 {
10469 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10470
10471 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
10472 if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START
10473 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
10474 {
10475 /* We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register
10476 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before
10477 EMInterpretWrmsr() changes it. */
10478 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
10479 }
10480 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
10481 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10482 else if (pMixedCtx->ecx == MSR_K6_EFER)
10483 {
10484 /*
10485 * If the guest touches EFER we need to update the VM-Entry and VM-Exit controls as well,
10486 * even if it is -not- touching bits that cause paging mode changes (LMA/LME). We care about
10487 * the other bits as well, SCE and NXE. See @bugref{7368}.
10488 */
10489 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS);
10490 }
10491
10492 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */
10493 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10494 {
10495 switch (pMixedCtx->ecx)
10496 {
10497 case MSR_IA32_SYSENTER_CS: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
10498 case MSR_IA32_SYSENTER_EIP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
10499 case MSR_IA32_SYSENTER_ESP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
10500 case MSR_K8_FS_BASE: /* no break */
10501 case MSR_K8_GS_BASE: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break;
10502 case MSR_K6_EFER: /* already handled above */ break;
10503 default:
10504 {
10505 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
10506 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
10507#if HC_ARCH_BITS == 64
10508 else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10509 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
10510#endif
10511 break;
10512 }
10513 }
10514 }
10515#ifdef VBOX_STRICT
10516 else
10517 {
10518 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
10519 switch (pMixedCtx->ecx)
10520 {
10521 case MSR_IA32_SYSENTER_CS:
10522 case MSR_IA32_SYSENTER_EIP:
10523 case MSR_IA32_SYSENTER_ESP:
10524 case MSR_K8_FS_BASE:
10525 case MSR_K8_GS_BASE:
10526 {
10527 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
10528 HMVMX_RETURN_UNEXPECTED_EXIT();
10529 }
10530
10531 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
10532 default:
10533 {
10534 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
10535 {
10536 /* EFER writes are always intercepted, see hmR0VmxLoadGuestMsrs(). */
10537 if (pMixedCtx->ecx != MSR_K6_EFER)
10538 {
10539 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
10540 pMixedCtx->ecx));
10541 HMVMX_RETURN_UNEXPECTED_EXIT();
10542 }
10543 }
10544
10545#if HC_ARCH_BITS == 64
10546 if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10547 {
10548 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
10549 HMVMX_RETURN_UNEXPECTED_EXIT();
10550 }
10551#endif
10552 break;
10553 }
10554 }
10555 }
10556#endif /* VBOX_STRICT */
10557 }
10558 return rc;
10559}
10560
10561
10562/**
10563 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10564 */
10565HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10566{
10567 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10568
10569 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT. */
10570 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
10571 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT)
10572 return VERR_EM_INTERPRETER;
10573 AssertMsgFailed(("Unexpected PAUSE exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10574 HMVMX_RETURN_UNEXPECTED_EXIT();
10575}
10576
10577
10578/**
10579 * VM-exit handler for when the TPR value is lowered below the specified
10580 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10581 */
10582HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10583{
10584 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10585 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
10586
10587 /*
10588 * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
10589 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectPendingEvent() and
10590 * resume guest execution.
10591 */
10592 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
10593 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
10594 return VINF_SUCCESS;
10595}
10596
10597
10598/**
10599 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
10600 * VM-exit.
10601 *
10602 * @retval VINF_SUCCESS when guest execution can continue.
10603 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
10604 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
10605 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
10606 * recompiler.
10607 */
10608HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10609{
10610 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10611 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
10612 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10613 AssertRCReturn(rc, rc);
10614
10615 const RTGCUINTPTR uExitQualification = pVmxTransient->uExitQualification;
10616 const uint32_t uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
10617 PVM pVM = pVCpu->CTX_SUFF(pVM);
10618 switch (uAccessType)
10619 {
10620 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */
10621 {
10622#if 0
10623 /* EMInterpretCRxWrite() references a lot of guest state (EFER, RFLAGS, Segment Registers, etc.) Sync entire state */
10624 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10625#else
10626 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
10627 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
10628 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10629#endif
10630 AssertRCReturn(rc, rc);
10631
10632 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
10633 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
10634 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
10635 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
10636
10637 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
10638 {
10639 case 0: /* CR0 */
10640 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
10641 Log4(("CRX CR0 write rc=%d CR0=%#RX64\n", rc, pMixedCtx->cr0));
10642 break;
10643 case 2: /* CR2 */
10644 /* Nothing to do here, CR2 it's not part of the VMCS. */
10645 break;
10646 case 3: /* CR3 */
10647 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
10648 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
10649 Log4(("CRX CR3 write rc=%d CR3=%#RX64\n", rc, pMixedCtx->cr3));
10650 break;
10651 case 4: /* CR4 */
10652 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
10653 Log4(("CRX CR4 write rc=%d CR4=%#RX64\n", rc, pMixedCtx->cr4));
10654 break;
10655 case 8: /* CR8 */
10656 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
10657 /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */
10658 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
10659 break;
10660 default:
10661 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
10662 break;
10663 }
10664
10665 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
10666 break;
10667 }
10668
10669 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */
10670 {
10671 /* EMInterpretCRxRead() requires EFER MSR, CS. */
10672 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10673 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
10674 AssertRCReturn(rc, rc);
10675 Assert( !pVM->hm.s.fNestedPaging
10676 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
10677 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
10678
10679 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
10680 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
10681 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
10682
10683 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
10684 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
10685 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
10686 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
10687 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
10688 Log4(("CRX CR%d Read access rc=%d\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), rc));
10689 break;
10690 }
10691
10692 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
10693 {
10694 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10695 AssertRCReturn(rc, rc);
10696 rc = EMInterpretCLTS(pVM, pVCpu);
10697 AssertRCReturn(rc, rc);
10698 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
10699 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
10700 Log4(("CRX CLTS write rc=%d\n", rc));
10701 break;
10702 }
10703
10704 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10705 {
10706 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10707 AssertRCReturn(rc, rc);
10708 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
10709 if (RT_LIKELY(rc == VINF_SUCCESS))
10710 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
10711 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
10712 Log4(("CRX LMSW write rc=%d\n", rc));
10713 break;
10714 }
10715
10716 default:
10717 {
10718 AssertMsgFailed(("Invalid access-type in Mov CRx exit qualification %#x\n", uAccessType));
10719 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
10720 }
10721 }
10722
10723 /* Validate possible error codes. */
10724 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_CHANGE_MODE || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_SYNC_CR3
10725 || rc == VERR_VMX_UNEXPECTED_EXCEPTION);
10726 if (RT_SUCCESS(rc))
10727 {
10728 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10729 AssertRCReturn(rc2, rc2);
10730 }
10731
10732 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
10733 return rc;
10734}
10735
10736
10737/**
10738 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
10739 * VM-exit.
10740 */
10741HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10742{
10743 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10744 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
10745
10746 int rc2 = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10747 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
10748 rc2 |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
10749 rc2 |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
10750 rc2 |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
10751 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
10752 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
10753 AssertRCReturn(rc2, rc2);
10754
10755 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
10756 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
10757 uint8_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
10758 bool fIOWrite = ( VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
10759 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
10760 bool fIOString = VMX_EXIT_QUALIFICATION_IO_IS_STRING(pVmxTransient->uExitQualification);
10761 bool fStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
10762 AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_VMX_IPE_1);
10763
10764 /* I/O operation lookup arrays. */
10765 static const uint32_t s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */
10766 static const uint32_t s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
10767
10768 VBOXSTRICTRC rcStrict;
10769 const uint32_t cbValue = s_aIOSizes[uIOWidth];
10770 const uint32_t cbInstr = pVmxTransient->cbInstr;
10771 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
10772 PVM pVM = pVCpu->CTX_SUFF(pVM);
10773 if (fIOString)
10774 {
10775#if 0 /* Not yet ready. IEM gurus with debian 32-bit guest without NP (on ATA reads). See @bugref{5752#c158} */
10776 /*
10777 * INS/OUTS - I/O String instruction.
10778 *
10779 * Use instruction-information if available, otherwise fall back on
10780 * interpreting the instruction.
10781 */
10782 Log4(("CS:RIP=%04x:%#RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
10783 AssertReturn(pMixedCtx->dx == uIOPort, VERR_VMX_IPE_2);
10784 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo))
10785 {
10786 rc2 = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
10787 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
10788 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10789 AssertRCReturn(rc2, rc2);
10790 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
10791 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
10792 IEMMODE enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
10793 bool fRep = VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification);
10794 if (fIOWrite)
10795 {
10796 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
10797 pVmxTransient->ExitInstrInfo.StrIo.iSegReg);
10798 }
10799 else
10800 {
10801 /*
10802 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
10803 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
10804 * See Intel Instruction spec. for "INS".
10805 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
10806 */
10807 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr);
10808 }
10809 }
10810 else
10811 {
10812 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
10813 rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10814 AssertRCReturn(rc2, rc2);
10815 rcStrict = IEMExecOne(pVCpu);
10816 }
10817 /** @todo IEM needs to be setting these flags somehow. */
10818 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
10819 fUpdateRipAlready = true;
10820#else
10821 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
10822 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL /* pcbInstr */);
10823 if (RT_SUCCESS(rcStrict))
10824 {
10825 if (fIOWrite)
10826 {
10827 rcStrict = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
10828 (DISCPUMODE)pDis->uAddrMode, cbValue);
10829 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
10830 }
10831 else
10832 {
10833 rcStrict = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
10834 (DISCPUMODE)pDis->uAddrMode, cbValue);
10835 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
10836 }
10837 }
10838 else
10839 {
10840 AssertMsg(rcStrict == VERR_EM_INTERPRETER, ("rcStrict=%Rrc RIP %#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->rip));
10841 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
10842 }
10843#endif
10844 }
10845 else
10846 {
10847 /*
10848 * IN/OUT - I/O instruction.
10849 */
10850 Log4(("CS:RIP=%04x:%#RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
10851 const uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
10852 Assert(!VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification));
10853 if (fIOWrite)
10854 {
10855 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue);
10856 if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
10857 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
10858 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
10859 }
10860 else
10861 {
10862 uint32_t u32Result = 0;
10863 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
10864 if (IOM_SUCCESS(rcStrict))
10865 {
10866 /* Save result of I/O IN instr. in AL/AX/EAX. */
10867 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
10868 }
10869 else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
10870 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
10871 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
10872 }
10873 }
10874
10875 if (IOM_SUCCESS(rcStrict))
10876 {
10877 if (!fUpdateRipAlready)
10878 {
10879 pMixedCtx->rip += cbInstr;
10880 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
10881 }
10882
10883 /*
10884 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru while booting Fedora 17 64-bit guest.
10885 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
10886 */
10887 if (fIOString)
10888 {
10889 /** @todo Single-step for INS/OUTS with REP prefix? */
10890 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
10891 }
10892 else if (fStepping)
10893 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
10894
10895 /*
10896 * If any I/O breakpoints are armed, we need to check if one triggered
10897 * and take appropriate action.
10898 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
10899 */
10900 rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
10901 AssertRCReturn(rc2, rc2);
10902
10903 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
10904 * execution engines about whether hyper BPs and such are pending. */
10905 uint32_t const uDr7 = pMixedCtx->dr[7];
10906 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
10907 && X86_DR7_ANY_RW_IO(uDr7)
10908 && (pMixedCtx->cr4 & X86_CR4_DE))
10909 || DBGFBpIsHwIoArmed(pVM)))
10910 {
10911 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
10912
10913 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
10914 VMMRZCallRing3Disable(pVCpu);
10915 HM_DISABLE_PREEMPT_IF_NEEDED();
10916
10917 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /*fDr6*/);
10918
10919 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue);
10920 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
10921 {
10922 /* Raise #DB. */
10923 if (fIsGuestDbgActive)
10924 ASMSetDR6(pMixedCtx->dr[6]);
10925 if (pMixedCtx->dr[7] != uDr7)
10926 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
10927
10928 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
10929 }
10930 /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */
10931 else if ( rcStrict2 != VINF_SUCCESS
10932 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
10933 rcStrict = rcStrict2;
10934
10935 HM_RESTORE_PREEMPT_IF_NEEDED();
10936 VMMRZCallRing3Enable(pVCpu);
10937 }
10938 }
10939
10940#ifdef DEBUG
10941 if (rcStrict == VINF_IOM_R3_IOPORT_READ)
10942 Assert(!fIOWrite);
10943 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
10944 Assert(fIOWrite);
10945 else
10946 {
10947 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
10948 * statuses, that the VMM device and some others may return. See
10949 * IOM_SUCCESS() for guidance. */
10950 AssertMsg( RT_FAILURE(rcStrict)
10951 || rcStrict == VINF_SUCCESS
10952 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
10953 || rcStrict == VINF_EM_DBG_BREAKPOINT
10954 || rcStrict == VINF_EM_RAW_GUEST_TRAP
10955 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10956 }
10957#endif
10958
10959 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
10960 return VBOXSTRICTRC_TODO(rcStrict);
10961}
10962
10963
10964/**
10965 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
10966 * VM-exit.
10967 */
10968HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10969{
10970 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10971
10972 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
10973 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10974 AssertRCReturn(rc, rc);
10975 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
10976 {
10977 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
10978 AssertRCReturn(rc, rc);
10979 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
10980 {
10981 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
10982
10983 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
10984 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
10985
10986 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */
10987 Assert(!pVCpu->hm.s.Event.fPending);
10988 pVCpu->hm.s.Event.fPending = true;
10989 pVCpu->hm.s.Event.u64IntInfo = pVmxTransient->uIdtVectoringInfo;
10990 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
10991 AssertRCReturn(rc, rc);
10992 if (fErrorCodeValid)
10993 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
10994 else
10995 pVCpu->hm.s.Event.u32ErrCode = 0;
10996 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
10997 && uVector == X86_XCPT_PF)
10998 {
10999 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2;
11000 }
11001
11002 Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
11003 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
11004 return VINF_EM_RAW_INJECT_TRPM_EVENT;
11005 }
11006 }
11007
11008 /** @todo Emulate task switch someday, currently just going back to ring-3 for
11009 * emulation. */
11010 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
11011 return VERR_EM_INTERPRETER;
11012}
11013
11014
11015/**
11016 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
11017 */
11018HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11019{
11020 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11021 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
11022 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
11023 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
11024 AssertRCReturn(rc, rc);
11025 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
11026 return VINF_EM_DBG_STEPPED;
11027}
11028
11029
11030/**
11031 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
11032 */
11033HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11034{
11035 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11036
11037 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11038 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11039 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
11040 return VINF_SUCCESS;
11041 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
11042 return rc;
11043
11044#if 0
11045 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
11046 * just sync the whole thing. */
11047 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11048#else
11049 /* Aggressive state sync. for now. */
11050 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11051 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11052 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11053#endif
11054 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11055 AssertRCReturn(rc, rc);
11056
11057 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
11058 uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
11059 switch (uAccessType)
11060 {
11061 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
11062 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
11063 {
11064 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
11065 && VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) == 0x80)
11066 {
11067 AssertMsgFailed(("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
11068 }
11069
11070 RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
11071 GCPhys &= PAGE_BASE_GC_MASK;
11072 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
11073 PVM pVM = pVCpu->CTX_SUFF(pVM);
11074 Log4(("ApicAccess uAccessType=%#x GCPhys=%#RGv Off=%#x\n", uAccessType, GCPhys,
11075 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
11076
11077 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu,
11078 (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
11079 CPUMCTX2CORE(pMixedCtx), GCPhys);
11080 rc = VBOXSTRICTRC_VAL(rc2);
11081 Log4(("ApicAccess rc=%d\n", rc));
11082 if ( rc == VINF_SUCCESS
11083 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11084 || rc == VERR_PAGE_NOT_PRESENT)
11085 {
11086 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11087 | HM_CHANGED_GUEST_RSP
11088 | HM_CHANGED_GUEST_RFLAGS
11089 | HM_CHANGED_VMX_GUEST_APIC_STATE);
11090 rc = VINF_SUCCESS;
11091 }
11092 break;
11093 }
11094
11095 default:
11096 Log4(("ApicAccess uAccessType=%#x\n", uAccessType));
11097 rc = VINF_EM_RAW_EMULATE_INSTR;
11098 break;
11099 }
11100
11101 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
11102 return rc;
11103}
11104
11105
11106/**
11107 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
11108 * VM-exit.
11109 */
11110HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11111{
11112 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11113
11114 /* We should -not- get this VM-exit if the guest's debug registers were active. */
11115 if (pVmxTransient->fWasGuestDebugStateActive)
11116 {
11117 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11118 HMVMX_RETURN_UNEXPECTED_EXIT();
11119 }
11120
11121 int rc = VERR_INTERNAL_ERROR_5;
11122 if ( !DBGFIsStepping(pVCpu)
11123 && !pVCpu->hm.s.fSingleInstruction
11124 && !pVmxTransient->fWasHyperDebugStateActive)
11125 {
11126 /* Don't intercept MOV DRx and #DB any more. */
11127 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
11128 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
11129 AssertRCReturn(rc, rc);
11130
11131 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
11132 {
11133#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11134 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
11135 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
11136 AssertRCReturn(rc, rc);
11137#endif
11138 }
11139
11140 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
11141 VMMRZCallRing3Disable(pVCpu);
11142 HM_DISABLE_PREEMPT_IF_NEEDED();
11143
11144 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
11145 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
11146 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
11147
11148 HM_RESTORE_PREEMPT_IF_NEEDED();
11149 VMMRZCallRing3Enable(pVCpu);
11150
11151#ifdef VBOX_WITH_STATISTICS
11152 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11153 AssertRCReturn(rc, rc);
11154 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
11155 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
11156 else
11157 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
11158#endif
11159 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
11160 return VINF_SUCCESS;
11161 }
11162
11163 /*
11164 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date.
11165 * Update the segment registers and DR7 from the CPU.
11166 */
11167 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11168 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11169 rc |= hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11170 AssertRCReturn(rc, rc);
11171 Log4(("CS:RIP=%04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
11172
11173 PVM pVM = pVCpu->CTX_SUFF(pVM);
11174 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
11175 {
11176 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
11177 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
11178 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
11179 if (RT_SUCCESS(rc))
11180 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
11181 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
11182 }
11183 else
11184 {
11185 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
11186 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
11187 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
11188 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
11189 }
11190
11191 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
11192 if (RT_SUCCESS(rc))
11193 {
11194 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11195 AssertRCReturn(rc2, rc2);
11196 }
11197 return rc;
11198}
11199
11200
11201/**
11202 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
11203 * Conditional VM-exit.
11204 */
11205HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11206{
11207 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11208 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
11209
11210 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11211 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11212 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
11213 return VINF_SUCCESS;
11214 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
11215 return rc;
11216
11217 RTGCPHYS GCPhys = 0;
11218 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
11219
11220#if 0
11221 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
11222#else
11223 /* Aggressive state sync. for now. */
11224 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11225 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11226 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11227#endif
11228 AssertRCReturn(rc, rc);
11229
11230 /*
11231 * If we succeed, resume guest execution.
11232 * If we fail in interpreting the instruction because we couldn't get the guest physical address
11233 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
11234 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
11235 * weird case. See @bugref{6043}.
11236 */
11237 PVM pVM = pVCpu->CTX_SUFF(pVM);
11238 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
11239 rc = VBOXSTRICTRC_VAL(rc2);
11240 Log4(("EPT misconfig at %#RGv RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc));
11241 if ( rc == VINF_SUCCESS
11242 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11243 || rc == VERR_PAGE_NOT_PRESENT)
11244 {
11245 /* Successfully handled MMIO operation. */
11246 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11247 | HM_CHANGED_GUEST_RSP
11248 | HM_CHANGED_GUEST_RFLAGS
11249 | HM_CHANGED_VMX_GUEST_APIC_STATE);
11250 rc = VINF_SUCCESS;
11251 }
11252 return rc;
11253}
11254
11255
11256/**
11257 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
11258 * VM-exit.
11259 */
11260HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11261{
11262 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11263 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
11264
11265 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11266 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11267 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
11268 return VINF_SUCCESS;
11269 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
11270 return rc;
11271
11272 RTGCPHYS GCPhys = 0;
11273 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
11274 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11275#if 0
11276 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
11277#else
11278 /* Aggressive state sync. for now. */
11279 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11280 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11281 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11282#endif
11283 AssertRCReturn(rc, rc);
11284
11285 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
11286 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
11287
11288 RTGCUINT uErrorCode = 0;
11289 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
11290 uErrorCode |= X86_TRAP_PF_ID;
11291 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
11292 uErrorCode |= X86_TRAP_PF_RW;
11293 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
11294 uErrorCode |= X86_TRAP_PF_P;
11295
11296 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
11297
11298 Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:EIP=%04x:%#RX64\n", pVmxTransient->uExitQualification, GCPhys,
11299 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
11300
11301 /* Handle the pagefault trap for the nested shadow table. */
11302 PVM pVM = pVCpu->CTX_SUFF(pVM);
11303 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
11304 TRPMResetTrap(pVCpu);
11305
11306 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
11307 if ( rc == VINF_SUCCESS
11308 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11309 || rc == VERR_PAGE_NOT_PRESENT)
11310 {
11311 /* Successfully synced our nested page tables. */
11312 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
11313 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11314 | HM_CHANGED_GUEST_RSP
11315 | HM_CHANGED_GUEST_RFLAGS);
11316 return VINF_SUCCESS;
11317 }
11318
11319 Log4(("EPT return to ring-3 rc=%Rrc\n", rc));
11320 return rc;
11321}
11322
11323/** @} */
11324
11325/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11326/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
11327/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11328
11329/** @name VM-exit exception handlers.
11330 * @{
11331 */
11332
11333/**
11334 * VM-exit exception handler for #MF (Math Fault: floating point exception).
11335 */
11336static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11337{
11338 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11339 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
11340
11341 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11342 AssertRCReturn(rc, rc);
11343
11344 if (!(pMixedCtx->cr0 & X86_CR0_NE))
11345 {
11346 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
11347 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
11348
11349 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
11350 * provides VM-exit instruction length. If this causes problem later,
11351 * disassemble the instruction like it's done on AMD-V. */
11352 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11353 AssertRCReturn(rc2, rc2);
11354 return rc;
11355 }
11356
11357 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11358 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11359 return rc;
11360}
11361
11362
11363/**
11364 * VM-exit exception handler for #BP (Breakpoint exception).
11365 */
11366static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11367{
11368 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11369 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
11370
11371 /** @todo Try optimize this by not saving the entire guest state unless
11372 * really needed. */
11373 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11374 AssertRCReturn(rc, rc);
11375
11376 PVM pVM = pVCpu->CTX_SUFF(pVM);
11377 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11378 if (rc == VINF_EM_RAW_GUEST_TRAP)
11379 {
11380 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11381 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11382 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11383 AssertRCReturn(rc, rc);
11384
11385 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11386 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11387 }
11388
11389 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
11390 return rc;
11391}
11392
11393
11394/**
11395 * VM-exit exception handler for #DB (Debug exception).
11396 */
11397static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11398{
11399 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11400 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
11401 Log6(("XcptDB\n"));
11402
11403 /*
11404 * Get the DR6-like values from the exit qualification and pass it to DBGF
11405 * for processing.
11406 */
11407 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11408 AssertRCReturn(rc, rc);
11409
11410 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
11411 uint64_t uDR6 = X86_DR6_INIT_VAL;
11412 uDR6 |= ( pVmxTransient->uExitQualification
11413 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
11414
11415 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
11416 if (rc == VINF_EM_RAW_GUEST_TRAP)
11417 {
11418 /*
11419 * The exception was for the guest. Update DR6, DR7.GD and
11420 * IA32_DEBUGCTL.LBR before forwarding it.
11421 * (See Intel spec. 27.1 "Architectural State before a VM-Exit".)
11422 */
11423 VMMRZCallRing3Disable(pVCpu);
11424 HM_DISABLE_PREEMPT_IF_NEEDED();
11425
11426 pMixedCtx->dr[6] &= ~X86_DR6_B_MASK;
11427 pMixedCtx->dr[6] |= uDR6;
11428 if (CPUMIsGuestDebugStateActive(pVCpu))
11429 ASMSetDR6(pMixedCtx->dr[6]);
11430
11431 HM_RESTORE_PREEMPT_IF_NEEDED();
11432 VMMRZCallRing3Enable(pVCpu);
11433
11434 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11435 AssertRCReturn(rc, rc);
11436
11437 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
11438 pMixedCtx->dr[7] &= ~X86_DR7_GD;
11439
11440 /* Paranoia. */
11441 pMixedCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
11442 pMixedCtx->dr[7] |= X86_DR7_RA1_MASK;
11443
11444 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
11445 AssertRCReturn(rc, rc);
11446
11447 /*
11448 * Raise #DB in the guest.
11449 *
11450 * It is important to reflect what the VM-exit gave us (preserving the interruption-type) rather than use
11451 * hmR0VmxSetPendingXcptDB() as the #DB could've been raised while executing ICEBP and not the 'normal' #DB.
11452 * Thus it -may- trigger different handling in the CPU (like skipped DPL checks). See @bugref{6398}.
11453 *
11454 * Since ICEBP isn't documented on Intel, see AMD spec. 15.20 "Event Injection".
11455 */
11456 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11457 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11458 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11459 AssertRCReturn(rc, rc);
11460 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11461 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11462 return VINF_SUCCESS;
11463 }
11464
11465 /*
11466 * Not a guest trap, must be a hypervisor related debug event then.
11467 * Update DR6 in case someone is interested in it.
11468 */
11469 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
11470 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
11471 CPUMSetHyperDR6(pVCpu, uDR6);
11472
11473 return rc;
11474}
11475
11476
11477/**
11478 * VM-exit exception handler for #NM (Device-not-available exception: floating
11479 * point exception).
11480 */
11481static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11482{
11483 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11484
11485 /* We require CR0 and EFER. EFER is always up-to-date. */
11486 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11487 AssertRCReturn(rc, rc);
11488
11489 /* We're playing with the host CPU state here, have to disable preemption or longjmp. */
11490 VMMRZCallRing3Disable(pVCpu);
11491 HM_DISABLE_PREEMPT_IF_NEEDED();
11492
11493 /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */
11494 if (pVmxTransient->fWasGuestFPUStateActive)
11495 {
11496 rc = VINF_EM_RAW_GUEST_TRAP;
11497 Assert(CPUMIsGuestFPUStateActive(pVCpu) || HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));
11498 }
11499 else
11500 {
11501#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11502 Assert(!pVmxTransient->fWasGuestFPUStateActive);
11503#endif
11504 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
11505 Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu)));
11506 }
11507
11508 HM_RESTORE_PREEMPT_IF_NEEDED();
11509 VMMRZCallRing3Enable(pVCpu);
11510
11511 if (rc == VINF_SUCCESS)
11512 {
11513 /* Guest FPU state was activated, we'll want to change CR0 FPU intercepts before the next VM-reentry. */
11514 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
11515 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
11516 pVCpu->hm.s.fUseGuestFpu = true;
11517 }
11518 else
11519 {
11520 /* Forward #NM to the guest. */
11521 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
11522 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11523 AssertRCReturn(rc, rc);
11524 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11525 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
11526 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
11527 }
11528
11529 return VINF_SUCCESS;
11530}
11531
11532
11533/**
11534 * VM-exit exception handler for #GP (General-protection exception).
11535 *
11536 * @remarks Requires pVmxTransient->uExitIntInfo to be up-to-date.
11537 */
11538static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11539{
11540 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11541 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
11542
11543 int rc = VERR_INTERNAL_ERROR_5;
11544 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
11545 {
11546#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11547 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
11548 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11549 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11550 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11551 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11552 AssertRCReturn(rc, rc);
11553 Log4(("#GP Gst: CS:RIP %04x:%#RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip,
11554 pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));
11555 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11556 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11557 return rc;
11558#else
11559 /* We don't intercept #GP. */
11560 AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
11561 NOREF(pVmxTransient);
11562 return VERR_VMX_UNEXPECTED_EXCEPTION;
11563#endif
11564 }
11565
11566 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
11567 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
11568
11569 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
11570 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11571 AssertRCReturn(rc, rc);
11572
11573 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
11574 uint32_t cbOp = 0;
11575 PVM pVM = pVCpu->CTX_SUFF(pVM);
11576 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
11577 if (RT_SUCCESS(rc))
11578 {
11579 rc = VINF_SUCCESS;
11580 Assert(cbOp == pDis->cbInstr);
11581 Log4(("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
11582 switch (pDis->pCurInstr->uOpcode)
11583 {
11584 case OP_CLI:
11585 {
11586 pMixedCtx->eflags.Bits.u1IF = 0;
11587 pMixedCtx->rip += pDis->cbInstr;
11588 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11589 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11590 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
11591 break;
11592 }
11593
11594 case OP_STI:
11595 {
11596 pMixedCtx->eflags.Bits.u1IF = 1;
11597 pMixedCtx->rip += pDis->cbInstr;
11598 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
11599 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
11600 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11601 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11602 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
11603 break;
11604 }
11605
11606 case OP_HLT:
11607 {
11608 rc = VINF_EM_HALT;
11609 pMixedCtx->rip += pDis->cbInstr;
11610 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
11611 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
11612 break;
11613 }
11614
11615 case OP_POPF:
11616 {
11617 Log4(("POPF CS:RIP %04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
11618 uint32_t cbParm;
11619 uint32_t uMask;
11620 bool fStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
11621 if (pDis->fPrefix & DISPREFIX_OPSIZE)
11622 {
11623 cbParm = 4;
11624 uMask = 0xffffffff;
11625 }
11626 else
11627 {
11628 cbParm = 2;
11629 uMask = 0xffff;
11630 }
11631
11632 /* Get the stack pointer & pop the contents of the stack onto Eflags. */
11633 RTGCPTR GCPtrStack = 0;
11634 X86EFLAGS Eflags;
11635 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
11636 &GCPtrStack);
11637 if (RT_SUCCESS(rc))
11638 {
11639 Assert(sizeof(Eflags.u32) >= cbParm);
11640 Eflags.u32 = 0;
11641 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u32, cbParm);
11642 }
11643 if (RT_FAILURE(rc))
11644 {
11645 rc = VERR_EM_INTERPRETER;
11646 break;
11647 }
11648 Log4(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
11649 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
11650 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask);
11651 pMixedCtx->eflags.Bits.u1RF = 0; /* The RF bit is always cleared by POPF; see Intel Instruction reference. */
11652 pMixedCtx->esp += cbParm;
11653 pMixedCtx->esp &= uMask;
11654 pMixedCtx->rip += pDis->cbInstr;
11655 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11656 | HM_CHANGED_GUEST_RSP
11657 | HM_CHANGED_GUEST_RFLAGS);
11658 /* Generate a pending-debug exception when stepping over POPF regardless of how POPF modifies EFLAGS.TF. */
11659 if (fStepping)
11660 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11661
11662 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
11663 break;
11664 }
11665
11666 case OP_PUSHF:
11667 {
11668 uint32_t cbParm;
11669 uint32_t uMask;
11670 if (pDis->fPrefix & DISPREFIX_OPSIZE)
11671 {
11672 cbParm = 4;
11673 uMask = 0xffffffff;
11674 }
11675 else
11676 {
11677 cbParm = 2;
11678 uMask = 0xffff;
11679 }
11680
11681 /* Get the stack pointer & push the contents of eflags onto the stack. */
11682 RTGCPTR GCPtrStack = 0;
11683 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
11684 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
11685 if (RT_FAILURE(rc))
11686 {
11687 rc = VERR_EM_INTERPRETER;
11688 break;
11689 }
11690 X86EFLAGS Eflags = pMixedCtx->eflags;
11691 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
11692 Eflags.Bits.u1RF = 0;
11693 Eflags.Bits.u1VM = 0;
11694
11695 rc = PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u, cbParm);
11696 if (RT_FAILURE(rc))
11697 {
11698 rc = VERR_EM_INTERPRETER;
11699 break;
11700 }
11701 Log4(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack));
11702 pMixedCtx->esp -= cbParm;
11703 pMixedCtx->esp &= uMask;
11704 pMixedCtx->rip += pDis->cbInstr;
11705 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP);
11706 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11707 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
11708 break;
11709 }
11710
11711 case OP_IRET:
11712 {
11713 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
11714 * instruction reference. */
11715 RTGCPTR GCPtrStack = 0;
11716 uint32_t uMask = 0xffff;
11717 bool fStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
11718 uint16_t aIretFrame[3];
11719 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
11720 {
11721 rc = VERR_EM_INTERPRETER;
11722 break;
11723 }
11724 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
11725 &GCPtrStack);
11726 if (RT_SUCCESS(rc))
11727 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame));
11728 if (RT_FAILURE(rc))
11729 {
11730 rc = VERR_EM_INTERPRETER;
11731 break;
11732 }
11733 pMixedCtx->eip = 0;
11734 pMixedCtx->ip = aIretFrame[0];
11735 pMixedCtx->cs.Sel = aIretFrame[1];
11736 pMixedCtx->cs.ValidSel = aIretFrame[1];
11737 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
11738 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
11739 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
11740 pMixedCtx->sp += sizeof(aIretFrame);
11741 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11742 | HM_CHANGED_GUEST_SEGMENT_REGS
11743 | HM_CHANGED_GUEST_RSP
11744 | HM_CHANGED_GUEST_RFLAGS);
11745 /* Generate a pending-debug exception when stepping over IRET regardless of how IRET modifies EFLAGS.TF. */
11746 if (fStepping)
11747 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11748 Log4(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
11749 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
11750 break;
11751 }
11752
11753 case OP_INT:
11754 {
11755 uint16_t uVector = pDis->Param1.uValue & 0xff;
11756 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
11757 /* INT clears EFLAGS.TF, we mustn't set any pending debug exceptions here. */
11758 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
11759 break;
11760 }
11761
11762 case OP_INTO:
11763 {
11764 if (pMixedCtx->eflags.Bits.u1OF)
11765 {
11766 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
11767 /* INTO clears EFLAGS.TF, we mustn't set any pending debug exceptions here. */
11768 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
11769 }
11770 break;
11771 }
11772
11773 default:
11774 {
11775 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
11776 EMCODETYPE_SUPERVISOR);
11777 rc = VBOXSTRICTRC_VAL(rc2);
11778 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
11779 /** @todo We have to set pending-debug exceptions here when the guest is
11780 * single-stepping depending on the instruction that was interpreted. */
11781 Log4(("#GP rc=%Rrc\n", rc));
11782 break;
11783 }
11784 }
11785 }
11786 else
11787 rc = VERR_EM_INTERPRETER;
11788
11789 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
11790 ("#GP Unexpected rc=%Rrc\n", rc));
11791 return rc;
11792}
11793
11794
11795#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11796/**
11797 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
11798 * the exception reported in the VMX transient structure back into the VM.
11799 *
11800 * @remarks Requires uExitIntInfo in the VMX transient structure to be
11801 * up-to-date.
11802 */
11803static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11804{
11805 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11806
11807 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
11808 hmR0VmxCheckExitDueToEventDelivery(). */
11809 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11810 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11811 AssertRCReturn(rc, rc);
11812 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
11813
11814#ifdef DEBUG_ramshankar
11815 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11816 uint8_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11817 Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip));
11818#endif
11819
11820 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11821 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11822 return VINF_SUCCESS;
11823}
11824#endif
11825
11826
11827/**
11828 * VM-exit exception handler for #PF (Page-fault exception).
11829 */
11830static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11831{
11832 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11833 PVM pVM = pVCpu->CTX_SUFF(pVM);
11834 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11835 rc |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11836 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11837 AssertRCReturn(rc, rc);
11838
11839#if defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) || defined(HMVMX_ALWAYS_TRAP_PF)
11840 if (pVM->hm.s.fNestedPaging)
11841 {
11842 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
11843 if (RT_LIKELY(!pVmxTransient->fVectoringPF))
11844 {
11845 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
11846 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11847 0 /* cbInstr */, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQualification);
11848 }
11849 else
11850 {
11851 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
11852 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
11853 Log4(("Pending #DF due to vectoring #PF. NP\n"));
11854 }
11855 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
11856 return rc;
11857 }
11858#else
11859 Assert(!pVM->hm.s.fNestedPaging);
11860 NOREF(pVM);
11861#endif
11862
11863 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11864 AssertRCReturn(rc, rc);
11865
11866 Log4(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
11867 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntErrorCode, pMixedCtx->cr3));
11868
11869 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
11870 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pMixedCtx),
11871 (RTGCPTR)pVmxTransient->uExitQualification);
11872
11873 Log4(("#PF: rc=%Rrc\n", rc));
11874 if (rc == VINF_SUCCESS)
11875 {
11876 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
11877 /** @todo this isn't quite right, what if guest does lgdt with some MMIO
11878 * memory? We don't update the whole state here... */
11879 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11880 | HM_CHANGED_GUEST_RSP
11881 | HM_CHANGED_GUEST_RFLAGS
11882 | HM_CHANGED_VMX_GUEST_APIC_STATE);
11883 TRPMResetTrap(pVCpu);
11884 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
11885 return rc;
11886 }
11887 else if (rc == VINF_EM_RAW_GUEST_TRAP)
11888 {
11889 if (!pVmxTransient->fVectoringPF)
11890 {
11891 /* It's a guest page fault and needs to be reflected to the guest. */
11892 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
11893 TRPMResetTrap(pVCpu);
11894 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
11895 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
11896 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11897 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
11898 }
11899 else
11900 {
11901 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
11902 TRPMResetTrap(pVCpu);
11903 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
11904 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
11905 Log4(("#PF: Pending #DF due to vectoring #PF\n"));
11906 }
11907
11908 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
11909 return VINF_SUCCESS;
11910 }
11911
11912 TRPMResetTrap(pVCpu);
11913 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
11914 return rc;
11915}
11916
11917/** @} */
11918
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette