VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 58912

Last change on this file since 58912 was 58912, checked in by vboxsync, 9 years ago

HMGLOBALCPUINFO: Cache the RTR0MemObjGetPagePhysAddr and RTR0MemObjAddress results as they aren't necessiarly all that fast.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 510.2 KB
Line 
1/* $Id: HMVMXR0.cpp 58912 2015-11-29 20:08:14Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#include <iprt/x86.h>
24#include <iprt/asm-amd64-x86.h>
25#include <iprt/thread.h>
26
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/dbgf.h>
29#include <VBox/vmm/iem.h>
30#include <VBox/vmm/iom.h>
31#include <VBox/vmm/selm.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/gim.h>
34#ifdef VBOX_WITH_REM
35# include <VBox/vmm/rem.h>
36#endif
37#include "HMInternal.h"
38#include <VBox/vmm/vm.h>
39#include "HMVMXR0.h"
40#include "dtrace/VBoxVMM.h"
41
42#ifdef DEBUG_ramshankar
43# define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS
44# define HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE
45# define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
46# define HMVMX_ALWAYS_CHECK_GUEST_STATE
47# define HMVMX_ALWAYS_TRAP_ALL_XCPTS
48# define HMVMX_ALWAYS_TRAP_PF
49# define HMVMX_ALWAYS_SWAP_FPU_STATE
50# define HMVMX_ALWAYS_FLUSH_TLB
51# define HMVMX_ALWAYS_SWAP_EFER
52#endif
53
54
55/*********************************************************************************************************************************
56* Defined Constants And Macros *
57*********************************************************************************************************************************/
58/** Use the function table. */
59#define HMVMX_USE_FUNCTION_TABLE
60
61/** Determine which tagged-TLB flush handler to use. */
62#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
63#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
64#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
65#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
66
67/** @name Updated-guest-state flags.
68 * @{ */
69#define HMVMX_UPDATED_GUEST_RIP RT_BIT(0)
70#define HMVMX_UPDATED_GUEST_RSP RT_BIT(1)
71#define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2)
72#define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3)
73#define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4)
74#define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5)
75#define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6)
76#define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7)
77#define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8)
78#define HMVMX_UPDATED_GUEST_TR RT_BIT(9)
79#define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10)
80#define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(11)
81#define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(12)
82#define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(13)
83#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(14)
84#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(15)
85#define HMVMX_UPDATED_GUEST_LAZY_MSRS RT_BIT(16)
86#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(17)
87#define HMVMX_UPDATED_GUEST_INTR_STATE RT_BIT(18)
88#define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19)
89#define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \
90 | HMVMX_UPDATED_GUEST_RSP \
91 | HMVMX_UPDATED_GUEST_RFLAGS \
92 | HMVMX_UPDATED_GUEST_CR0 \
93 | HMVMX_UPDATED_GUEST_CR3 \
94 | HMVMX_UPDATED_GUEST_CR4 \
95 | HMVMX_UPDATED_GUEST_GDTR \
96 | HMVMX_UPDATED_GUEST_IDTR \
97 | HMVMX_UPDATED_GUEST_LDTR \
98 | HMVMX_UPDATED_GUEST_TR \
99 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \
100 | HMVMX_UPDATED_GUEST_DEBUG \
101 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \
102 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \
103 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \
104 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \
105 | HMVMX_UPDATED_GUEST_LAZY_MSRS \
106 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \
107 | HMVMX_UPDATED_GUEST_INTR_STATE \
108 | HMVMX_UPDATED_GUEST_APIC_STATE)
109/** @} */
110
111/** @name
112 * Flags to skip redundant reads of some common VMCS fields that are not part of
113 * the guest-CPU state but are in the transient structure.
114 */
115#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
116#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
117#define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
118#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3)
119#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
120#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
121#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO RT_BIT(6)
122/** @} */
123
124/** @name
125 * States of the VMCS.
126 *
127 * This does not reflect all possible VMCS states but currently only those
128 * needed for maintaining the VMCS consistently even when thread-context hooks
129 * are used. Maybe later this can be extended (i.e. Nested Virtualization).
130 */
131#define HMVMX_VMCS_STATE_CLEAR RT_BIT(0)
132#define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1)
133#define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2)
134/** @} */
135
136/**
137 * Exception bitmap mask for real-mode guests (real-on-v86).
138 *
139 * We need to intercept all exceptions manually except:
140 * - \#NM, \#MF handled in hmR0VmxLoadSharedCR0().
141 * - \#DB handled in hmR0VmxLoadSharedDebugState().
142 * - \#PF need not be intercepted even in real-mode if we have Nested Paging
143 * support.
144 */
145#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
146 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
147 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
148 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
149 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
150 /* RT_BIT(X86_XCPT_MF) always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
151 | RT_BIT(X86_XCPT_XF))
152
153/**
154 * Exception bitmap mask for all contributory exceptions.
155 *
156 * Page fault is deliberately excluded here as it's conditional as to whether
157 * it's contributory or benign. Page faults are handled separately.
158 */
159#define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
160 | RT_BIT(X86_XCPT_DE))
161
162/** Maximum VM-instruction error number. */
163#define HMVMX_INSTR_ERROR_MAX 28
164
165/** Profiling macro. */
166#ifdef HM_PROFILE_EXIT_DISPATCH
167# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
168# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
169#else
170# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
171# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
172#endif
173
174/** Assert that preemption is disabled or covered by thread-context hooks. */
175#define HMVMX_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHookIsEnabled(pVCpu) \
176 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
177
178/** Assert that we haven't migrated CPUs when thread-context hooks are not
179 * used. */
180#define HMVMX_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHookIsEnabled(pVCpu) \
181 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
182 ("Illegal migration! Entered on CPU %u Current %u\n", \
183 pVCpu->hm.s.idEnteredCpu, RTMpCpuId())); \
184
185/** Helper macro for VM-exit handlers called unexpectedly. */
186#define HMVMX_RETURN_UNEXPECTED_EXIT() \
187 do { \
188 pVCpu->hm.s.u32HMError = pVmxTransient->uExitReason; \
189 return VERR_VMX_UNEXPECTED_EXIT; \
190 } while (0)
191
192
193/*********************************************************************************************************************************
194* Structures and Typedefs *
195*********************************************************************************************************************************/
196/**
197 * VMX transient state.
198 *
199 * A state structure for holding miscellaneous information across
200 * VMX non-root operation and restored after the transition.
201 */
202typedef struct VMXTRANSIENT
203{
204 /** The host's rflags/eflags. */
205 RTCCUINTREG fEFlags;
206#if HC_ARCH_BITS == 32
207 uint32_t u32Alignment0;
208#endif
209 /** The guest's TPR value used for TPR shadowing. */
210 uint8_t u8GuestTpr;
211 /** Alignment. */
212 uint8_t abAlignment0[7];
213
214 /** The basic VM-exit reason. */
215 uint16_t uExitReason;
216 /** Alignment. */
217 uint16_t u16Alignment0;
218 /** The VM-exit interruption error code. */
219 uint32_t uExitIntErrorCode;
220 /** The VM-exit exit code qualification. */
221 uint64_t uExitQualification;
222
223 /** The VM-exit interruption-information field. */
224 uint32_t uExitIntInfo;
225 /** The VM-exit instruction-length field. */
226 uint32_t cbInstr;
227 /** The VM-exit instruction-information field. */
228 union
229 {
230 /** Plain unsigned int representation. */
231 uint32_t u;
232 /** INS and OUTS information. */
233 struct
234 {
235 uint32_t u6Reserved0 : 7;
236 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
237 uint32_t u3AddrSize : 3;
238 uint32_t u5Reserved1 : 5;
239 /** The segment register (X86_SREG_XXX). */
240 uint32_t iSegReg : 3;
241 uint32_t uReserved2 : 14;
242 } StrIo;
243 } ExitInstrInfo;
244 /** Whether the VM-entry failed or not. */
245 bool fVMEntryFailed;
246 /** Alignment. */
247 uint8_t abAlignment1[3];
248
249 /** The VM-entry interruption-information field. */
250 uint32_t uEntryIntInfo;
251 /** The VM-entry exception error code field. */
252 uint32_t uEntryXcptErrorCode;
253 /** The VM-entry instruction length field. */
254 uint32_t cbEntryInstr;
255
256 /** IDT-vectoring information field. */
257 uint32_t uIdtVectoringInfo;
258 /** IDT-vectoring error code. */
259 uint32_t uIdtVectoringErrorCode;
260
261 /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
262 uint32_t fVmcsFieldsRead;
263
264 /** Whether the guest FPU was active at the time of VM-exit. */
265 bool fWasGuestFPUStateActive;
266 /** Whether the guest debug state was active at the time of VM-exit. */
267 bool fWasGuestDebugStateActive;
268 /** Whether the hyper debug state was active at the time of VM-exit. */
269 bool fWasHyperDebugStateActive;
270 /** Whether TSC-offsetting should be setup before VM-entry. */
271 bool fUpdateTscOffsettingAndPreemptTimer;
272 /** Whether the VM-exit was caused by a page-fault during delivery of a
273 * contributory exception or a page-fault. */
274 bool fVectoringDoublePF;
275 /** Whether the VM-exit was caused by a page-fault during delivery of an
276 * external interrupt or NMI. */
277 bool fVectoringPF;
278} VMXTRANSIENT;
279AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
280AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntInfo, sizeof(uint64_t));
281AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo, sizeof(uint64_t));
282AssertCompileMemberAlignment(VMXTRANSIENT, fWasGuestFPUStateActive, sizeof(uint64_t));
283AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
284/** Pointer to VMX transient state. */
285typedef VMXTRANSIENT *PVMXTRANSIENT;
286
287
288/**
289 * MSR-bitmap read permissions.
290 */
291typedef enum VMXMSREXITREAD
292{
293 /** Reading this MSR causes a VM-exit. */
294 VMXMSREXIT_INTERCEPT_READ = 0xb,
295 /** Reading this MSR does not cause a VM-exit. */
296 VMXMSREXIT_PASSTHRU_READ
297} VMXMSREXITREAD;
298/** Pointer to MSR-bitmap read permissions. */
299typedef VMXMSREXITREAD* PVMXMSREXITREAD;
300
301/**
302 * MSR-bitmap write permissions.
303 */
304typedef enum VMXMSREXITWRITE
305{
306 /** Writing to this MSR causes a VM-exit. */
307 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
308 /** Writing to this MSR does not cause a VM-exit. */
309 VMXMSREXIT_PASSTHRU_WRITE
310} VMXMSREXITWRITE;
311/** Pointer to MSR-bitmap write permissions. */
312typedef VMXMSREXITWRITE* PVMXMSREXITWRITE;
313
314
315/**
316 * VMX VM-exit handler.
317 *
318 * @returns VBox status code.
319 * @param pVCpu The cross context virtual CPU structure.
320 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
321 * out-of-sync. Make sure to update the required
322 * fields before using them.
323 * @param pVmxTransient Pointer to the VMX-transient structure.
324 */
325#ifndef HMVMX_USE_FUNCTION_TABLE
326typedef int FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
327#else
328typedef DECLCALLBACK(int) FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
329/** Pointer to VM-exit handler. */
330typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
331#endif
332
333
334/*********************************************************************************************************************************
335* Internal Functions *
336*********************************************************************************************************************************/
337static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush);
338static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr);
339static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu);
340static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
341 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress,
342 bool fStepping, uint32_t *puIntState);
343#if HC_ARCH_BITS == 32
344static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
345#endif
346#ifndef HMVMX_USE_FUNCTION_TABLE
347DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
348# define HMVMX_EXIT_DECL static int
349#else
350# define HMVMX_EXIT_DECL static DECLCALLBACK(int)
351#endif
352DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitStep(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
353 uint32_t uExitReason, uint16_t uCsStart, uint64_t uRipStart);
354
355/** @name VM-exit handlers.
356 * @{
357 */
358static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmi;
359static FNVMXEXITHANDLER hmR0VmxExitExtInt;
360static FNVMXEXITHANDLER hmR0VmxExitTripleFault;
361static FNVMXEXITHANDLER hmR0VmxExitInitSignal;
362static FNVMXEXITHANDLER hmR0VmxExitSipi;
363static FNVMXEXITHANDLER hmR0VmxExitIoSmi;
364static FNVMXEXITHANDLER hmR0VmxExitSmi;
365static FNVMXEXITHANDLER hmR0VmxExitIntWindow;
366static FNVMXEXITHANDLER hmR0VmxExitNmiWindow;
367static FNVMXEXITHANDLER hmR0VmxExitTaskSwitch;
368static FNVMXEXITHANDLER hmR0VmxExitCpuid;
369static FNVMXEXITHANDLER hmR0VmxExitGetsec;
370static FNVMXEXITHANDLER hmR0VmxExitHlt;
371static FNVMXEXITHANDLER hmR0VmxExitInvd;
372static FNVMXEXITHANDLER hmR0VmxExitInvlpg;
373static FNVMXEXITHANDLER hmR0VmxExitRdpmc;
374static FNVMXEXITHANDLER hmR0VmxExitVmcall;
375static FNVMXEXITHANDLER hmR0VmxExitRdtsc;
376static FNVMXEXITHANDLER hmR0VmxExitRsm;
377static FNVMXEXITHANDLER hmR0VmxExitSetPendingXcptUD;
378static FNVMXEXITHANDLER hmR0VmxExitMovCRx;
379static FNVMXEXITHANDLER hmR0VmxExitMovDRx;
380static FNVMXEXITHANDLER hmR0VmxExitIoInstr;
381static FNVMXEXITHANDLER hmR0VmxExitRdmsr;
382static FNVMXEXITHANDLER hmR0VmxExitWrmsr;
383static FNVMXEXITHANDLER hmR0VmxExitErrInvalidGuestState;
384static FNVMXEXITHANDLER hmR0VmxExitErrMsrLoad;
385static FNVMXEXITHANDLER hmR0VmxExitErrUndefined;
386static FNVMXEXITHANDLER hmR0VmxExitMwait;
387static FNVMXEXITHANDLER hmR0VmxExitMtf;
388static FNVMXEXITHANDLER hmR0VmxExitMonitor;
389static FNVMXEXITHANDLER hmR0VmxExitPause;
390static FNVMXEXITHANDLER hmR0VmxExitErrMachineCheck;
391static FNVMXEXITHANDLER hmR0VmxExitTprBelowThreshold;
392static FNVMXEXITHANDLER hmR0VmxExitApicAccess;
393static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
394static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
395static FNVMXEXITHANDLER hmR0VmxExitEptViolation;
396static FNVMXEXITHANDLER hmR0VmxExitEptMisconfig;
397static FNVMXEXITHANDLER hmR0VmxExitRdtscp;
398static FNVMXEXITHANDLER hmR0VmxExitPreemptTimer;
399static FNVMXEXITHANDLER hmR0VmxExitWbinvd;
400static FNVMXEXITHANDLER hmR0VmxExitXsetbv;
401static FNVMXEXITHANDLER hmR0VmxExitRdrand;
402static FNVMXEXITHANDLER hmR0VmxExitInvpcid;
403/** @} */
404
405static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
406static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
407static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
408static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
409static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
410static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
411static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
412#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
413static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
414#endif
415static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
416
417
418/*********************************************************************************************************************************
419* Global Variables *
420*********************************************************************************************************************************/
421#ifdef HMVMX_USE_FUNCTION_TABLE
422
423/**
424 * VMX_EXIT dispatch table.
425 */
426static const PFNVMXEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
427{
428 /* 00 VMX_EXIT_XCPT_OR_NMI */ hmR0VmxExitXcptOrNmi,
429 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
430 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
431 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
432 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
433 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
434 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
435 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
436 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
437 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
438 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
439 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
440 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
441 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
442 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
443 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
444 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
445 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
446 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitVmcall,
447 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
448 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
449 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
450 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
451 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
452 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
453 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
454 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
455 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
456 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
457 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
458 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
459 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
460 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
461 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
462 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
463 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
464 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
465 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
466 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
467 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
468 /* 40 UNDEFINED */ hmR0VmxExitPause,
469 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
470 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
471 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
472 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
473 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
474 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
475 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
476 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
477 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
478 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
479 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
480 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
481 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
482 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
483 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
484 /* 56 UNDEFINED */ hmR0VmxExitErrUndefined,
485 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
486 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
487 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD,
488 /* 60 VMX_EXIT_RESERVED_60 */ hmR0VmxExitErrUndefined,
489 /* 61 VMX_EXIT_RDSEED */ hmR0VmxExitErrUndefined, /* only spurious exits, so undefined */
490 /* 62 VMX_EXIT_RESERVED_62 */ hmR0VmxExitErrUndefined,
491 /* 63 VMX_EXIT_XSAVES */ hmR0VmxExitSetPendingXcptUD,
492 /* 64 VMX_EXIT_XRSTORS */ hmR0VmxExitSetPendingXcptUD,
493};
494#endif /* HMVMX_USE_FUNCTION_TABLE */
495
496#ifdef VBOX_STRICT
497static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
498{
499 /* 0 */ "(Not Used)",
500 /* 1 */ "VMCALL executed in VMX root operation.",
501 /* 2 */ "VMCLEAR with invalid physical address.",
502 /* 3 */ "VMCLEAR with VMXON pointer.",
503 /* 4 */ "VMLAUNCH with non-clear VMCS.",
504 /* 5 */ "VMRESUME with non-launched VMCS.",
505 /* 6 */ "VMRESUME after VMXOFF",
506 /* 7 */ "VM-entry with invalid control fields.",
507 /* 8 */ "VM-entry with invalid host state fields.",
508 /* 9 */ "VMPTRLD with invalid physical address.",
509 /* 10 */ "VMPTRLD with VMXON pointer.",
510 /* 11 */ "VMPTRLD with incorrect revision identifier.",
511 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
512 /* 13 */ "VMWRITE to read-only VMCS component.",
513 /* 14 */ "(Not Used)",
514 /* 15 */ "VMXON executed in VMX root operation.",
515 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
516 /* 17 */ "VM-entry with non-launched executing VMCS.",
517 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
518 /* 19 */ "VMCALL with non-clear VMCS.",
519 /* 20 */ "VMCALL with invalid VM-exit control fields.",
520 /* 21 */ "(Not Used)",
521 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
522 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
523 /* 24 */ "VMCALL with invalid SMM-monitor features.",
524 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
525 /* 26 */ "VM-entry with events blocked by MOV SS.",
526 /* 27 */ "(Not Used)",
527 /* 28 */ "Invalid operand to INVEPT/INVVPID."
528};
529#endif /* VBOX_STRICT */
530
531
532
533/**
534 * Updates the VM's last error record.
535 *
536 * If there was a VMX instruction error, reads the error data from the VMCS and
537 * updates VCPU's last error record as well.
538 *
539 * @param pVM The cross context VM structure.
540 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
541 * Can be NULL if @a rc is not VERR_VMX_UNABLE_TO_START_VM or
542 * VERR_VMX_INVALID_VMCS_FIELD.
543 * @param rc The error code.
544 */
545static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
546{
547 AssertPtr(pVM);
548 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
549 || rc == VERR_VMX_UNABLE_TO_START_VM)
550 {
551 AssertPtrReturnVoid(pVCpu);
552 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
553 }
554 pVM->hm.s.lLastError = rc;
555}
556
557
558/**
559 * Reads the VM-entry interruption-information field from the VMCS into the VMX
560 * transient structure.
561 *
562 * @returns VBox status code.
563 * @param pVmxTransient Pointer to the VMX transient structure.
564 *
565 * @remarks No-long-jump zone!!!
566 */
567DECLINLINE(int) hmR0VmxReadEntryIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
568{
569 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
570 AssertRCReturn(rc, rc);
571 return VINF_SUCCESS;
572}
573
574
575/**
576 * Reads the VM-entry exception error code field from the VMCS into
577 * the VMX transient structure.
578 *
579 * @returns VBox status code.
580 * @param pVmxTransient Pointer to the VMX transient structure.
581 *
582 * @remarks No-long-jump zone!!!
583 */
584DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
585{
586 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
587 AssertRCReturn(rc, rc);
588 return VINF_SUCCESS;
589}
590
591
592/**
593 * Reads the VM-entry exception error code field from the VMCS into
594 * the VMX transient structure.
595 *
596 * @returns VBox status code.
597 * @param pVmxTransient Pointer to the VMX transient structure.
598 *
599 * @remarks No-long-jump zone!!!
600 */
601DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
602{
603 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
604 AssertRCReturn(rc, rc);
605 return VINF_SUCCESS;
606}
607
608
609/**
610 * Reads the VM-exit interruption-information field from the VMCS into the VMX
611 * transient structure.
612 *
613 * @returns VBox status code.
614 * @param pVmxTransient Pointer to the VMX transient structure.
615 */
616DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
617{
618 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
619 {
620 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
621 AssertRCReturn(rc, rc);
622 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
623 }
624 return VINF_SUCCESS;
625}
626
627
628/**
629 * Reads the VM-exit interruption error code from the VMCS into the VMX
630 * transient structure.
631 *
632 * @returns VBox status code.
633 * @param pVmxTransient Pointer to the VMX transient structure.
634 */
635DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
636{
637 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
638 {
639 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
640 AssertRCReturn(rc, rc);
641 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
642 }
643 return VINF_SUCCESS;
644}
645
646
647/**
648 * Reads the VM-exit instruction length field from the VMCS into the VMX
649 * transient structure.
650 *
651 * @returns VBox status code.
652 * @param pVmxTransient Pointer to the VMX transient structure.
653 */
654DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
655{
656 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
657 {
658 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
659 AssertRCReturn(rc, rc);
660 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
661 }
662 return VINF_SUCCESS;
663}
664
665
666/**
667 * Reads the VM-exit instruction-information field from the VMCS into
668 * the VMX transient structure.
669 *
670 * @returns VBox status code.
671 * @param pVmxTransient Pointer to the VMX transient structure.
672 */
673DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient)
674{
675 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO))
676 {
677 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
678 AssertRCReturn(rc, rc);
679 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO;
680 }
681 return VINF_SUCCESS;
682}
683
684
685/**
686 * Reads the exit code qualification from the VMCS into the VMX transient
687 * structure.
688 *
689 * @returns VBox status code.
690 * @param pVCpu The cross context virtual CPU structure of the
691 * calling EMT. (Required for the VMCS cache case.)
692 * @param pVmxTransient Pointer to the VMX transient structure.
693 */
694DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
695{
696 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
697 {
698 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification); NOREF(pVCpu);
699 AssertRCReturn(rc, rc);
700 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
701 }
702 return VINF_SUCCESS;
703}
704
705
706/**
707 * Reads the IDT-vectoring information field from the VMCS into the VMX
708 * transient structure.
709 *
710 * @returns VBox status code.
711 * @param pVmxTransient Pointer to the VMX transient structure.
712 *
713 * @remarks No-long-jump zone!!!
714 */
715DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
716{
717 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
718 {
719 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
720 AssertRCReturn(rc, rc);
721 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
722 }
723 return VINF_SUCCESS;
724}
725
726
727/**
728 * Reads the IDT-vectoring error code from the VMCS into the VMX
729 * transient structure.
730 *
731 * @returns VBox status code.
732 * @param pVmxTransient Pointer to the VMX transient structure.
733 */
734DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
735{
736 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
737 {
738 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
739 AssertRCReturn(rc, rc);
740 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
741 }
742 return VINF_SUCCESS;
743}
744
745
746/**
747 * Enters VMX root mode operation on the current CPU.
748 *
749 * @returns VBox status code.
750 * @param pVM The cross context VM structure. Can be
751 * NULL, after a resume.
752 * @param HCPhysCpuPage Physical address of the VMXON region.
753 * @param pvCpuPage Pointer to the VMXON region.
754 */
755static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
756{
757 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
758 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
759 Assert(pvCpuPage);
760 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
761
762 if (pVM)
763 {
764 /* Write the VMCS revision dword to the VMXON region. */
765 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
766 }
767
768 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */
769 RTCCUINTREG fEFlags = ASMIntDisableFlags();
770
771 /* Enable the VMX bit in CR4 if necessary. */
772 RTCCUINTREG uOldCr4 = SUPR0ChangeCR4(X86_CR4_VMXE, ~0);
773
774 /* Enter VMX root mode. */
775 int rc = VMXEnable(HCPhysCpuPage);
776 if (RT_FAILURE(rc))
777 {
778 if (!(uOldCr4 & X86_CR4_VMXE))
779 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
780
781 if (pVM)
782 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
783 }
784
785 /* Restore interrupts. */
786 ASMSetFlags(fEFlags);
787 return rc;
788}
789
790
791/**
792 * Exits VMX root mode operation on the current CPU.
793 *
794 * @returns VBox status code.
795 */
796static int hmR0VmxLeaveRootMode(void)
797{
798 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
799
800 /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */
801 RTCCUINTREG fEFlags = ASMIntDisableFlags();
802
803 /* If we're for some reason not in VMX root mode, then don't leave it. */
804 RTCCUINTREG uHostCR4 = ASMGetCR4();
805
806 int rc;
807 if (uHostCR4 & X86_CR4_VMXE)
808 {
809 /* Exit VMX root mode and clear the VMX bit in CR4. */
810 VMXDisable();
811 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
812 rc = VINF_SUCCESS;
813 }
814 else
815 rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE;
816
817 /* Restore interrupts. */
818 ASMSetFlags(fEFlags);
819 return rc;
820}
821
822
823/**
824 * Allocates and maps one physically contiguous page. The allocated page is
825 * zero'd out. (Used by various VT-x structures).
826 *
827 * @returns IPRT status code.
828 * @param pMemObj Pointer to the ring-0 memory object.
829 * @param ppVirt Where to store the virtual address of the
830 * allocation.
831 * @param pHCPhys Where to store the physical address of the
832 * allocation.
833 */
834DECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
835{
836 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
837 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
838 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
839
840 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
841 if (RT_FAILURE(rc))
842 return rc;
843 *ppVirt = RTR0MemObjAddress(*pMemObj);
844 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
845 ASMMemZero32(*ppVirt, PAGE_SIZE);
846 return VINF_SUCCESS;
847}
848
849
850/**
851 * Frees and unmaps an allocated physical page.
852 *
853 * @param pMemObj Pointer to the ring-0 memory object.
854 * @param ppVirt Where to re-initialize the virtual address of
855 * allocation as 0.
856 * @param pHCPhys Where to re-initialize the physical address of the
857 * allocation as 0.
858 */
859DECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
860{
861 AssertPtr(pMemObj);
862 AssertPtr(ppVirt);
863 AssertPtr(pHCPhys);
864 if (*pMemObj != NIL_RTR0MEMOBJ)
865 {
866 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
867 AssertRC(rc);
868 *pMemObj = NIL_RTR0MEMOBJ;
869 *ppVirt = 0;
870 *pHCPhys = 0;
871 }
872}
873
874
875/**
876 * Worker function to free VT-x related structures.
877 *
878 * @returns IPRT status code.
879 * @param pVM The cross context VM structure.
880 */
881static void hmR0VmxStructsFree(PVM pVM)
882{
883 for (VMCPUID i = 0; i < pVM->cCpus; i++)
884 {
885 PVMCPU pVCpu = &pVM->aCpus[i];
886 AssertPtr(pVCpu);
887
888 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
889 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
890
891 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
892 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
893
894 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
895 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
896 }
897
898 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
899#ifdef VBOX_WITH_CRASHDUMP_MAGIC
900 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
901#endif
902}
903
904
905/**
906 * Worker function to allocate VT-x related VM structures.
907 *
908 * @returns IPRT status code.
909 * @param pVM The cross context VM structure.
910 */
911static int hmR0VmxStructsAlloc(PVM pVM)
912{
913 /*
914 * Initialize members up-front so we can cleanup properly on allocation failure.
915 */
916#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
917 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
918 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
919 pVM->hm.s.vmx.HCPhys##a_Name = 0;
920
921#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
922 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
923 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
924 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
925
926#ifdef VBOX_WITH_CRASHDUMP_MAGIC
927 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
928#endif
929 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
930
931 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
932 for (VMCPUID i = 0; i < pVM->cCpus; i++)
933 {
934 PVMCPU pVCpu = &pVM->aCpus[i];
935 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
936 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
937 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
938 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
939 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
940 }
941#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
942#undef VMXLOCAL_INIT_VM_MEMOBJ
943
944 /* The VMCS size cannot be more than 4096 bytes. See Intel spec. Appendix A.1 "Basic VMX Information". */
945 AssertReturnStmt(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.Msrs.u64BasicInfo) <= PAGE_SIZE,
946 (&pVM->aCpus[0])->hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE,
947 VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO);
948
949 /*
950 * Allocate all the VT-x structures.
951 */
952 int rc = VINF_SUCCESS;
953#ifdef VBOX_WITH_CRASHDUMP_MAGIC
954 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
955 if (RT_FAILURE(rc))
956 goto cleanup;
957 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
958 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
959#endif
960
961 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
962 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
963 {
964 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
965 &pVM->hm.s.vmx.HCPhysApicAccess);
966 if (RT_FAILURE(rc))
967 goto cleanup;
968 }
969
970 /*
971 * Initialize per-VCPU VT-x structures.
972 */
973 for (VMCPUID i = 0; i < pVM->cCpus; i++)
974 {
975 PVMCPU pVCpu = &pVM->aCpus[i];
976 AssertPtr(pVCpu);
977
978 /* Allocate the VM control structure (VMCS). */
979 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
980 if (RT_FAILURE(rc))
981 goto cleanup;
982
983 /* Allocate the Virtual-APIC page for transparent TPR accesses. */
984 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
985 {
986 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
987 &pVCpu->hm.s.vmx.HCPhysVirtApic);
988 if (RT_FAILURE(rc))
989 goto cleanup;
990 }
991
992 /*
993 * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for
994 * transparent accesses of specific MSRs.
995 *
996 * If the condition for enabling MSR bitmaps changes here, don't forget to
997 * update HMAreMsrBitmapsAvailable().
998 */
999 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1000 {
1001 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
1002 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1003 if (RT_FAILURE(rc))
1004 goto cleanup;
1005 ASMMemFill32(pVCpu->hm.s.vmx.pvMsrBitmap, PAGE_SIZE, UINT32_C(0xffffffff));
1006 }
1007
1008 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
1009 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
1010 if (RT_FAILURE(rc))
1011 goto cleanup;
1012
1013 /* Allocate the VM-exit MSR-load page for the host MSRs. */
1014 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
1015 if (RT_FAILURE(rc))
1016 goto cleanup;
1017 }
1018
1019 return VINF_SUCCESS;
1020
1021cleanup:
1022 hmR0VmxStructsFree(pVM);
1023 return rc;
1024}
1025
1026
1027/**
1028 * Does global VT-x initialization (called during module initialization).
1029 *
1030 * @returns VBox status code.
1031 */
1032VMMR0DECL(int) VMXR0GlobalInit(void)
1033{
1034#ifdef HMVMX_USE_FUNCTION_TABLE
1035 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
1036# ifdef VBOX_STRICT
1037 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
1038 Assert(g_apfnVMExitHandlers[i]);
1039# endif
1040#endif
1041 return VINF_SUCCESS;
1042}
1043
1044
1045/**
1046 * Does global VT-x termination (called during module termination).
1047 */
1048VMMR0DECL(void) VMXR0GlobalTerm()
1049{
1050 /* Nothing to do currently. */
1051}
1052
1053
1054/**
1055 * Sets up and activates VT-x on the current CPU.
1056 *
1057 * @returns VBox status code.
1058 * @param pCpu Pointer to the global CPU info struct.
1059 * @param pVM The cross context VM structure. Can be
1060 * NULL after a host resume operation.
1061 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
1062 * fEnabledByHost is @c true).
1063 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
1064 * @a fEnabledByHost is @c true).
1065 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
1066 * enable VT-x on the host.
1067 * @param pvMsrs Opaque pointer to VMXMSRS struct.
1068 */
1069VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
1070 void *pvMsrs)
1071{
1072 Assert(pCpu);
1073 Assert(pvMsrs);
1074 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1075
1076 /* Enable VT-x if it's not already enabled by the host. */
1077 if (!fEnabledByHost)
1078 {
1079 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
1080 if (RT_FAILURE(rc))
1081 return rc;
1082 }
1083
1084 /*
1085 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been using EPTPs) so
1086 * we don't retain any stale guest-physical mappings which won't get invalidated when flushing by VPID.
1087 */
1088 PVMXMSRS pMsrs = (PVMXMSRS)pvMsrs;
1089 if (pMsrs->u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1090 {
1091 hmR0VmxFlushEpt(NULL /* pVCpu */, VMXFLUSHEPT_ALL_CONTEXTS);
1092 pCpu->fFlushAsidBeforeUse = false;
1093 }
1094 else
1095 pCpu->fFlushAsidBeforeUse = true;
1096
1097 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
1098 ++pCpu->cTlbFlushes;
1099
1100 return VINF_SUCCESS;
1101}
1102
1103
1104/**
1105 * Deactivates VT-x on the current CPU.
1106 *
1107 * @returns VBox status code.
1108 * @param pCpu Pointer to the global CPU info struct.
1109 * @param pvCpuPage Pointer to the VMXON region.
1110 * @param HCPhysCpuPage Physical address of the VMXON region.
1111 *
1112 * @remarks This function should never be called when SUPR0EnableVTx() or
1113 * similar was used to enable VT-x on the host.
1114 */
1115VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
1116{
1117 NOREF(pCpu);
1118 NOREF(pvCpuPage);
1119 NOREF(HCPhysCpuPage);
1120
1121 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1122 return hmR0VmxLeaveRootMode();
1123}
1124
1125
1126/**
1127 * Sets the permission bits for the specified MSR in the MSR bitmap.
1128 *
1129 * @param pVCpu The cross context virtual CPU structure.
1130 * @param uMsr The MSR value.
1131 * @param enmRead Whether reading this MSR causes a VM-exit.
1132 * @param enmWrite Whether writing this MSR causes a VM-exit.
1133 */
1134static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
1135{
1136 int32_t iBit;
1137 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1138
1139 /*
1140 * Layout:
1141 * 0x000 - 0x3ff - Low MSR read bits
1142 * 0x400 - 0x7ff - High MSR read bits
1143 * 0x800 - 0xbff - Low MSR write bits
1144 * 0xc00 - 0xfff - High MSR write bits
1145 */
1146 if (uMsr <= 0x00001FFF)
1147 iBit = uMsr;
1148 else if ( uMsr >= 0xC0000000
1149 && uMsr <= 0xC0001FFF)
1150 {
1151 iBit = (uMsr - 0xC0000000);
1152 pbMsrBitmap += 0x400;
1153 }
1154 else
1155 AssertMsgFailedReturnVoid(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1156
1157 Assert(iBit <= 0x1fff);
1158 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1159 ASMBitSet(pbMsrBitmap, iBit);
1160 else
1161 ASMBitClear(pbMsrBitmap, iBit);
1162
1163 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1164 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1165 else
1166 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1167}
1168
1169
1170#ifdef VBOX_STRICT
1171/**
1172 * Gets the permission bits for the specified MSR in the MSR bitmap.
1173 *
1174 * @returns VBox status code.
1175 * @retval VINF_SUCCESS if the specified MSR is found.
1176 * @retval VERR_NOT_FOUND if the specified MSR is not found.
1177 * @retval VERR_NOT_SUPPORTED if VT-x doesn't allow the MSR.
1178 *
1179 * @param pVCpu The cross context virtual CPU structure.
1180 * @param uMsr The MSR.
1181 * @param penmRead Where to store the read permissions.
1182 * @param penmWrite Where to store the write permissions.
1183 */
1184static int hmR0VmxGetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, PVMXMSREXITREAD penmRead, PVMXMSREXITWRITE penmWrite)
1185{
1186 AssertPtrReturn(penmRead, VERR_INVALID_PARAMETER);
1187 AssertPtrReturn(penmWrite, VERR_INVALID_PARAMETER);
1188 int32_t iBit;
1189 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1190
1191 /* See hmR0VmxSetMsrPermission() for the layout. */
1192 if (uMsr <= 0x00001FFF)
1193 iBit = uMsr;
1194 else if ( uMsr >= 0xC0000000
1195 && uMsr <= 0xC0001FFF)
1196 {
1197 iBit = (uMsr - 0xC0000000);
1198 pbMsrBitmap += 0x400;
1199 }
1200 else
1201 AssertMsgFailedReturn(("hmR0VmxGetMsrPermission: Invalid MSR %#RX32\n", uMsr), VERR_NOT_SUPPORTED);
1202
1203 Assert(iBit <= 0x1fff);
1204 if (ASMBitTest(pbMsrBitmap, iBit))
1205 *penmRead = VMXMSREXIT_INTERCEPT_READ;
1206 else
1207 *penmRead = VMXMSREXIT_PASSTHRU_READ;
1208
1209 if (ASMBitTest(pbMsrBitmap + 0x800, iBit))
1210 *penmWrite = VMXMSREXIT_INTERCEPT_WRITE;
1211 else
1212 *penmWrite = VMXMSREXIT_PASSTHRU_WRITE;
1213 return VINF_SUCCESS;
1214}
1215#endif /* VBOX_STRICT */
1216
1217
1218/**
1219 * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
1220 * area.
1221 *
1222 * @returns VBox status code.
1223 * @param pVCpu The cross context virtual CPU structure.
1224 * @param cMsrs The number of MSRs.
1225 */
1226DECLINLINE(int) hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)
1227{
1228 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
1229 uint32_t const cMaxSupportedMsrs = MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc);
1230 if (RT_UNLIKELY(cMsrs > cMaxSupportedMsrs))
1231 {
1232 LogRel(("CPU auto-load/store MSR count in VMCS exceeded cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs));
1233 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
1234 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1235 }
1236
1237 /* Update number of guest MSRs to load/store across the world-switch. */
1238 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs); AssertRCReturn(rc, rc);
1239 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs); AssertRCReturn(rc, rc);
1240
1241 /* Update number of host MSRs to load after the world-switch. Identical to guest-MSR count as it's always paired. */
1242 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs); AssertRCReturn(rc, rc);
1243
1244 /* Update the VCPU's copy of the MSR count. */
1245 pVCpu->hm.s.vmx.cMsrs = cMsrs;
1246
1247 return VINF_SUCCESS;
1248}
1249
1250
1251/**
1252 * Adds a new (or updates the value of an existing) guest/host MSR
1253 * pair to be swapped during the world-switch as part of the
1254 * auto-load/store MSR area in the VMCS.
1255 *
1256 * @returns VBox status code.
1257 * @param pVCpu The cross context virtual CPU structure.
1258 * @param uMsr The MSR.
1259 * @param uGuestMsrValue Value of the guest MSR.
1260 * @param fUpdateHostMsr Whether to update the value of the host MSR if
1261 * necessary.
1262 * @param pfAddedAndUpdated Where to store whether the MSR was added -and-
1263 * its value was updated. Optional, can be NULL.
1264 */
1265static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr,
1266 bool *pfAddedAndUpdated)
1267{
1268 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1269 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1270 uint32_t i;
1271 for (i = 0; i < cMsrs; i++)
1272 {
1273 if (pGuestMsr->u32Msr == uMsr)
1274 break;
1275 pGuestMsr++;
1276 }
1277
1278 bool fAdded = false;
1279 if (i == cMsrs)
1280 {
1281 ++cMsrs;
1282 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1283 AssertMsgRCReturn(rc, ("hmR0VmxAddAutoLoadStoreMsr: Insufficient space to add MSR %u\n", uMsr), rc);
1284
1285 /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */
1286 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1287 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1288
1289 fAdded = true;
1290 }
1291
1292 /* Update the MSR values in the auto-load/store MSR area. */
1293 pGuestMsr->u32Msr = uMsr;
1294 pGuestMsr->u64Value = uGuestMsrValue;
1295
1296 /* Create/update the MSR slot in the host MSR area. */
1297 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1298 pHostMsr += i;
1299 pHostMsr->u32Msr = uMsr;
1300
1301 /*
1302 * Update the host MSR only when requested by the caller AND when we're
1303 * adding it to the auto-load/store area. Otherwise, it would have been
1304 * updated by hmR0VmxSaveHostMsrs(). We do this for performance reasons.
1305 */
1306 bool fUpdatedMsrValue = false;
1307 if ( fAdded
1308 && fUpdateHostMsr)
1309 {
1310 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1311 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1312 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1313 fUpdatedMsrValue = true;
1314 }
1315
1316 if (pfAddedAndUpdated)
1317 *pfAddedAndUpdated = fUpdatedMsrValue;
1318 return VINF_SUCCESS;
1319}
1320
1321
1322/**
1323 * Removes a guest/host MSR pair to be swapped during the world-switch from the
1324 * auto-load/store MSR area in the VMCS.
1325 *
1326 * @returns VBox status code.
1327 * @param pVCpu The cross context virtual CPU structure.
1328 * @param uMsr The MSR.
1329 */
1330static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr)
1331{
1332 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1333 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1334 for (uint32_t i = 0; i < cMsrs; i++)
1335 {
1336 /* Find the MSR. */
1337 if (pGuestMsr->u32Msr == uMsr)
1338 {
1339 /* If it's the last MSR, simply reduce the count. */
1340 if (i == cMsrs - 1)
1341 {
1342 --cMsrs;
1343 break;
1344 }
1345
1346 /* Remove it by swapping the last MSR in place of it, and reducing the count. */
1347 PVMXAUTOMSR pLastGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1348 pLastGuestMsr += cMsrs - 1;
1349 pGuestMsr->u32Msr = pLastGuestMsr->u32Msr;
1350 pGuestMsr->u64Value = pLastGuestMsr->u64Value;
1351
1352 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1353 PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1354 pLastHostMsr += cMsrs - 1;
1355 pHostMsr->u32Msr = pLastHostMsr->u32Msr;
1356 pHostMsr->u64Value = pLastHostMsr->u64Value;
1357 --cMsrs;
1358 break;
1359 }
1360 pGuestMsr++;
1361 }
1362
1363 /* Update the VMCS if the count changed (meaning the MSR was found). */
1364 if (cMsrs != pVCpu->hm.s.vmx.cMsrs)
1365 {
1366 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1367 AssertRCReturn(rc, rc);
1368
1369 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
1370 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1371 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
1372
1373 Log4(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));
1374 return VINF_SUCCESS;
1375 }
1376
1377 return VERR_NOT_FOUND;
1378}
1379
1380
1381/**
1382 * Checks if the specified guest MSR is part of the auto-load/store area in
1383 * the VMCS.
1384 *
1385 * @returns true if found, false otherwise.
1386 * @param pVCpu The cross context virtual CPU structure.
1387 * @param uMsr The MSR to find.
1388 */
1389static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1390{
1391 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1392 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1393
1394 for (uint32_t i = 0; i < cMsrs; i++, pGuestMsr++)
1395 {
1396 if (pGuestMsr->u32Msr == uMsr)
1397 return true;
1398 }
1399 return false;
1400}
1401
1402
1403/**
1404 * Updates the value of all host MSRs in the auto-load/store area in the VMCS.
1405 *
1406 * @param pVCpu The cross context virtual CPU structure.
1407 *
1408 * @remarks No-long-jump zone!!!
1409 */
1410static void hmR0VmxUpdateAutoLoadStoreHostMsrs(PVMCPU pVCpu)
1411{
1412 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1413 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1414 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1415 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1416
1417 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1418 {
1419 AssertReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr);
1420
1421 /*
1422 * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
1423 * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
1424 */
1425 if (pHostMsr->u32Msr == MSR_K6_EFER)
1426 pHostMsr->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostEfer;
1427 else
1428 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1429 }
1430
1431 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
1432}
1433
1434
1435#if HC_ARCH_BITS == 64
1436/**
1437 * Saves a set of host MSRs to allow read/write passthru access to the guest and
1438 * perform lazy restoration of the host MSRs while leaving VT-x.
1439 *
1440 * @param pVCpu The cross context virtual CPU structure.
1441 *
1442 * @remarks No-long-jump zone!!!
1443 */
1444static void hmR0VmxLazySaveHostMsrs(PVMCPU pVCpu)
1445{
1446 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1447
1448 /*
1449 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls().
1450 */
1451 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
1452 {
1453 pVCpu->hm.s.vmx.u64HostLStarMsr = ASMRdMsr(MSR_K8_LSTAR);
1454 pVCpu->hm.s.vmx.u64HostStarMsr = ASMRdMsr(MSR_K6_STAR);
1455 pVCpu->hm.s.vmx.u64HostSFMaskMsr = ASMRdMsr(MSR_K8_SF_MASK);
1456 pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1457 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;
1458 }
1459}
1460
1461
1462/**
1463 * Checks whether the MSR belongs to the set of guest MSRs that we restore
1464 * lazily while leaving VT-x.
1465 *
1466 * @returns true if it does, false otherwise.
1467 * @param pVCpu The cross context virtual CPU structure.
1468 * @param uMsr The MSR to check.
1469 */
1470static bool hmR0VmxIsLazyGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1471{
1472 NOREF(pVCpu);
1473 switch (uMsr)
1474 {
1475 case MSR_K8_LSTAR:
1476 case MSR_K6_STAR:
1477 case MSR_K8_SF_MASK:
1478 case MSR_K8_KERNEL_GS_BASE:
1479 return true;
1480 }
1481 return false;
1482}
1483
1484
1485/**
1486 * Saves a set of guest MSRs back into the guest-CPU context.
1487 *
1488 * @param pVCpu The cross context virtual CPU structure.
1489 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1490 * out-of-sync. Make sure to update the required fields
1491 * before using them.
1492 *
1493 * @remarks No-long-jump zone!!!
1494 */
1495static void hmR0VmxLazySaveGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1496{
1497 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1498 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1499
1500 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1501 {
1502 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1503 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
1504 pMixedCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
1505 pMixedCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
1506 pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1507 }
1508}
1509
1510
1511/**
1512 * Loads a set of guests MSRs to allow read/passthru to the guest.
1513 *
1514 * The name of this function is slightly confusing. This function does NOT
1515 * postpone loading, but loads the MSR right now. "hmR0VmxLazy" is simply a
1516 * common prefix for functions dealing with "lazy restoration" of the shared
1517 * MSRs.
1518 *
1519 * @param pVCpu The cross context virtual CPU structure.
1520 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1521 * out-of-sync. Make sure to update the required fields
1522 * before using them.
1523 *
1524 * @remarks No-long-jump zone!!!
1525 */
1526static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1527{
1528 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1529 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1530
1531#define VMXLOCAL_LAZY_LOAD_GUEST_MSR(uMsr, a_GuestMsr, a_HostMsr) \
1532 do { \
1533 if (pMixedCtx->msr##a_GuestMsr != pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr) \
1534 ASMWrMsr(uMsr, pMixedCtx->msr##a_GuestMsr); \
1535 else \
1536 Assert(ASMRdMsr(uMsr) == pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr); \
1537 } while (0)
1538
1539 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1540 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
1541 {
1542 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_LSTAR, LSTAR, LStar);
1543 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K6_STAR, STAR, Star);
1544 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, SFMask);
1545 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBase);
1546 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;
1547 }
1548 else
1549 {
1550 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR);
1551 ASMWrMsr(MSR_K6_STAR, pMixedCtx->msrSTAR);
1552 ASMWrMsr(MSR_K8_SF_MASK, pMixedCtx->msrSFMASK);
1553 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE);
1554 }
1555
1556#undef VMXLOCAL_LAZY_LOAD_GUEST_MSR
1557}
1558
1559
1560/**
1561 * Performs lazy restoration of the set of host MSRs if they were previously
1562 * loaded with guest MSR values.
1563 *
1564 * @param pVCpu The cross context virtual CPU structure.
1565 *
1566 * @remarks No-long-jump zone!!!
1567 * @remarks The guest MSRs should have been saved back into the guest-CPU
1568 * context by hmR0VmxSaveGuestLazyMsrs()!!!
1569 */
1570static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu)
1571{
1572 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1573 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1574
1575 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1576 {
1577 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1578 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64HostLStarMsr);
1579 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64HostStarMsr);
1580 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64HostSFMaskMsr);
1581 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr);
1582 }
1583 pVCpu->hm.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST);
1584}
1585#endif /* HC_ARCH_BITS == 64 */
1586
1587
1588/**
1589 * Verifies that our cached values of the VMCS controls are all
1590 * consistent with what's actually present in the VMCS.
1591 *
1592 * @returns VBox status code.
1593 * @param pVCpu The cross context virtual CPU structure.
1594 */
1595static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu)
1596{
1597 uint32_t u32Val;
1598 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
1599 AssertRCReturn(rc, rc);
1600 AssertMsgReturn(pVCpu->hm.s.vmx.u32EntryCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32EntryCtls, u32Val),
1601 VERR_VMX_ENTRY_CTLS_CACHE_INVALID);
1602
1603 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
1604 AssertRCReturn(rc, rc);
1605 AssertMsgReturn(pVCpu->hm.s.vmx.u32ExitCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ExitCtls, u32Val),
1606 VERR_VMX_EXIT_CTLS_CACHE_INVALID);
1607
1608 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1609 AssertRCReturn(rc, rc);
1610 AssertMsgReturn(pVCpu->hm.s.vmx.u32PinCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32PinCtls, u32Val),
1611 VERR_VMX_PIN_EXEC_CTLS_CACHE_INVALID);
1612
1613 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1614 AssertRCReturn(rc, rc);
1615 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls, u32Val),
1616 VERR_VMX_PROC_EXEC_CTLS_CACHE_INVALID);
1617
1618 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1619 {
1620 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1621 AssertRCReturn(rc, rc);
1622 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls2 == u32Val,
1623 ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls2, u32Val),
1624 VERR_VMX_PROC_EXEC2_CTLS_CACHE_INVALID);
1625 }
1626
1627 return VINF_SUCCESS;
1628}
1629
1630
1631#ifdef VBOX_STRICT
1632/**
1633 * Verifies that our cached host EFER value has not changed
1634 * since we cached it.
1635 *
1636 * @param pVCpu The cross context virtual CPU structure.
1637 */
1638static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu)
1639{
1640 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1641
1642 if (pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
1643 {
1644 uint64_t u64Val;
1645 int rc = VMXReadVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, &u64Val);
1646 AssertRC(rc);
1647
1648 uint64_t u64HostEferMsr = ASMRdMsr(MSR_K6_EFER);
1649 AssertMsgReturnVoid(u64HostEferMsr == u64Val, ("u64HostEferMsr=%#RX64 u64Val=%#RX64\n", u64HostEferMsr, u64Val));
1650 }
1651}
1652
1653
1654/**
1655 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the
1656 * VMCS are correct.
1657 *
1658 * @param pVCpu The cross context virtual CPU structure.
1659 */
1660static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu)
1661{
1662 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1663
1664 /* Verify MSR counts in the VMCS are what we think it should be. */
1665 uint32_t cMsrs;
1666 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1667 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1668
1669 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs); AssertRC(rc);
1670 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1671
1672 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1673 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1674
1675 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1676 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1677 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1678 {
1679 /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */
1680 AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32 cMsrs=%u\n", pHostMsr->u32Msr,
1681 pGuestMsr->u32Msr, cMsrs));
1682
1683 uint64_t u64Msr = ASMRdMsr(pHostMsr->u32Msr);
1684 AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
1685 pHostMsr->u32Msr, pHostMsr->u64Value, u64Msr, cMsrs));
1686
1687 /* Verify that the permissions are as expected in the MSR bitmap. */
1688 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1689 {
1690 VMXMSREXITREAD enmRead;
1691 VMXMSREXITWRITE enmWrite;
1692 rc = hmR0VmxGetMsrPermission(pVCpu, pGuestMsr->u32Msr, &enmRead, &enmWrite);
1693 AssertMsgReturnVoid(rc == VINF_SUCCESS, ("hmR0VmxGetMsrPermission! failed. rc=%Rrc\n", rc));
1694 if (pGuestMsr->u32Msr == MSR_K6_EFER)
1695 {
1696 AssertMsgReturnVoid(enmRead == VMXMSREXIT_INTERCEPT_READ, ("Passthru read for EFER!?\n"));
1697 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_INTERCEPT_WRITE, ("Passthru write for EFER!?\n"));
1698 }
1699 else
1700 {
1701 AssertMsgReturnVoid(enmRead == VMXMSREXIT_PASSTHRU_READ, ("u32Msr=%#RX32 cMsrs=%u No passthru read!\n",
1702 pGuestMsr->u32Msr, cMsrs));
1703 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 cMsrs=%u No passthru write!\n",
1704 pGuestMsr->u32Msr, cMsrs));
1705 }
1706 }
1707 }
1708}
1709#endif /* VBOX_STRICT */
1710
1711
1712/**
1713 * Flushes the TLB using EPT.
1714 *
1715 * @returns VBox status code.
1716 * @param pVCpu The cross context virtual CPU structure of the calling
1717 * EMT. Can be NULL depending on @a enmFlush.
1718 * @param enmFlush Type of flush.
1719 *
1720 * @remarks Caller is responsible for making sure this function is called only
1721 * when NestedPaging is supported and providing @a enmFlush that is
1722 * supported by the CPU.
1723 * @remarks Can be called with interrupts disabled.
1724 */
1725static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush)
1726{
1727 uint64_t au64Descriptor[2];
1728 if (enmFlush == VMXFLUSHEPT_ALL_CONTEXTS)
1729 au64Descriptor[0] = 0;
1730 else
1731 {
1732 Assert(pVCpu);
1733 au64Descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1734 }
1735 au64Descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1736
1737 int rc = VMXR0InvEPT(enmFlush, &au64Descriptor[0]);
1738 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0,
1739 rc));
1740 if ( RT_SUCCESS(rc)
1741 && pVCpu)
1742 {
1743 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1744 }
1745}
1746
1747
1748/**
1749 * Flushes the TLB using VPID.
1750 *
1751 * @returns VBox status code.
1752 * @param pVM The cross context VM structure.
1753 * @param pVCpu The cross context virtual CPU structure of the calling
1754 * EMT. Can be NULL depending on @a enmFlush.
1755 * @param enmFlush Type of flush.
1756 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1757 * on @a enmFlush).
1758 *
1759 * @remarks Can be called with interrupts disabled.
1760 */
1761static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr)
1762{
1763 NOREF(pVM);
1764 AssertPtr(pVM);
1765 Assert(pVM->hm.s.vmx.fVpid);
1766
1767 uint64_t au64Descriptor[2];
1768 if (enmFlush == VMXFLUSHVPID_ALL_CONTEXTS)
1769 {
1770 au64Descriptor[0] = 0;
1771 au64Descriptor[1] = 0;
1772 }
1773 else
1774 {
1775 AssertPtr(pVCpu);
1776 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1777 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1778 au64Descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1779 au64Descriptor[1] = GCPtr;
1780 }
1781
1782 int rc = VMXR0InvVPID(enmFlush, &au64Descriptor[0]); NOREF(rc);
1783 AssertMsg(rc == VINF_SUCCESS,
1784 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1785 if ( RT_SUCCESS(rc)
1786 && pVCpu)
1787 {
1788 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1789 }
1790}
1791
1792
1793/**
1794 * Invalidates a guest page by guest virtual address. Only relevant for
1795 * EPT/VPID, otherwise there is nothing really to invalidate.
1796 *
1797 * @returns VBox status code.
1798 * @param pVM The cross context VM structure.
1799 * @param pVCpu The cross context virtual CPU structure.
1800 * @param GCVirt Guest virtual address of the page to invalidate.
1801 */
1802VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1803{
1804 AssertPtr(pVM);
1805 AssertPtr(pVCpu);
1806 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
1807
1808 bool fFlushPending = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
1809 if (!fFlushPending)
1810 {
1811 /*
1812 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
1813 * See @bugref{6043} and @bugref{6177}.
1814 *
1815 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
1816 * function maybe called in a loop with individual addresses.
1817 */
1818 if (pVM->hm.s.vmx.fVpid)
1819 {
1820 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1821 {
1822 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, GCVirt);
1823 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1824 }
1825 else
1826 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1827 }
1828 else if (pVM->hm.s.fNestedPaging)
1829 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1830 }
1831
1832 return VINF_SUCCESS;
1833}
1834
1835
1836/**
1837 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
1838 * otherwise there is nothing really to invalidate.
1839 *
1840 * @returns VBox status code.
1841 * @param pVM The cross context VM structure.
1842 * @param pVCpu The cross context virtual CPU structure.
1843 * @param GCPhys Guest physical address of the page to invalidate.
1844 */
1845VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1846{
1847 NOREF(pVM); NOREF(GCPhys);
1848 LogFlowFunc(("%RGp\n", GCPhys));
1849
1850 /*
1851 * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
1852 * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
1853 * This function might be called in a loop. This should cause a flush-by-EPT if EPT is in use. See @bugref{6568}.
1854 */
1855 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1856 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
1857 return VINF_SUCCESS;
1858}
1859
1860
1861/**
1862 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1863 * case where neither EPT nor VPID is supported by the CPU.
1864 *
1865 * @param pVM The cross context VM structure.
1866 * @param pVCpu The cross context virtual CPU structure.
1867 * @param pCpu Pointer to the global HM struct.
1868 *
1869 * @remarks Called with interrupts disabled.
1870 */
1871static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1872{
1873 AssertPtr(pVCpu);
1874 AssertPtr(pCpu);
1875 NOREF(pVM);
1876
1877 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1878
1879 Assert(pCpu->idCpu != NIL_RTCPUID);
1880 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1881 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1882 pVCpu->hm.s.fForceTLBFlush = false;
1883 return;
1884}
1885
1886
1887/**
1888 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1889 *
1890 * @param pVM The cross context VM structure.
1891 * @param pVCpu The cross context virtual CPU structure.
1892 * @param pCpu Pointer to the global HM CPU struct.
1893 * @remarks All references to "ASID" in this function pertains to "VPID" in
1894 * Intel's nomenclature. The reason is, to avoid confusion in compare
1895 * statements since the host-CPU copies are named "ASID".
1896 *
1897 * @remarks Called with interrupts disabled.
1898 */
1899static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1900{
1901#ifdef VBOX_WITH_STATISTICS
1902 bool fTlbFlushed = false;
1903# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
1904# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
1905 if (!fTlbFlushed) \
1906 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
1907 } while (0)
1908#else
1909# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
1910# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
1911#endif
1912
1913 AssertPtr(pVM);
1914 AssertPtr(pCpu);
1915 AssertPtr(pVCpu);
1916 Assert(pCpu->idCpu != NIL_RTCPUID);
1917
1918 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1919 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1920 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1921
1922 /*
1923 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1924 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1925 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1926 */
1927 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1928 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1929 {
1930 ++pCpu->uCurrentAsid;
1931 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1932 {
1933 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
1934 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1935 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1936 }
1937
1938 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1939 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1940 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1941
1942 /*
1943 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
1944 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
1945 */
1946 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1947 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1948 HMVMX_SET_TAGGED_TLB_FLUSHED();
1949 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); /* Already flushed-by-EPT, skip doing it again below. */
1950 }
1951
1952 /* Check for explicit TLB flushes. */
1953 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1954 {
1955 /*
1956 * Changes to the EPT paging structure by VMM requires flushing by EPT as the CPU creates
1957 * guest-physical (only EPT-tagged) mappings while traversing the EPT tables when EPT is in use.
1958 * Flushing by VPID will only flush linear (only VPID-tagged) and combined (EPT+VPID tagged) mappings
1959 * but not guest-physical mappings.
1960 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}.
1961 */
1962 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1963 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1964 HMVMX_SET_TAGGED_TLB_FLUSHED();
1965 }
1966
1967 pVCpu->hm.s.fForceTLBFlush = false;
1968 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
1969
1970 Assert(pVCpu->hm.s.idLastCpu == pCpu->idCpu);
1971 Assert(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes);
1972 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1973 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1974 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1975 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
1976 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
1977 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1978 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1979
1980 /* Update VMCS with the VPID. */
1981 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1982 AssertRC(rc);
1983
1984#undef HMVMX_SET_TAGGED_TLB_FLUSHED
1985}
1986
1987
1988/**
1989 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
1990 *
1991 * @returns VBox status code.
1992 * @param pVM The cross context VM structure.
1993 * @param pVCpu The cross context virtual CPU structure.
1994 * @param pCpu Pointer to the global HM CPU struct.
1995 *
1996 * @remarks Called with interrupts disabled.
1997 */
1998static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1999{
2000 AssertPtr(pVM);
2001 AssertPtr(pVCpu);
2002 AssertPtr(pCpu);
2003 Assert(pCpu->idCpu != NIL_RTCPUID);
2004 AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
2005 AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
2006
2007 /*
2008 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
2009 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
2010 */
2011 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2012 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2013 {
2014 pVCpu->hm.s.fForceTLBFlush = true;
2015 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2016 }
2017
2018 /* Check for explicit TLB flushes. */
2019 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2020 {
2021 pVCpu->hm.s.fForceTLBFlush = true;
2022 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2023 }
2024
2025 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2026 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2027
2028 if (pVCpu->hm.s.fForceTLBFlush)
2029 {
2030 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2031 pVCpu->hm.s.fForceTLBFlush = false;
2032 }
2033}
2034
2035
2036/**
2037 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
2038 *
2039 * @returns VBox status code.
2040 * @param pVM The cross context VM structure.
2041 * @param pVCpu The cross context virtual CPU structure.
2042 * @param pCpu Pointer to the global HM CPU struct.
2043 *
2044 * @remarks Called with interrupts disabled.
2045 */
2046static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2047{
2048 AssertPtr(pVM);
2049 AssertPtr(pVCpu);
2050 AssertPtr(pCpu);
2051 Assert(pCpu->idCpu != NIL_RTCPUID);
2052 AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
2053 AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
2054
2055 /*
2056 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
2057 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
2058 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
2059 */
2060 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2061 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2062 {
2063 pVCpu->hm.s.fForceTLBFlush = true;
2064 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2065 }
2066
2067 /* Check for explicit TLB flushes. */
2068 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2069 {
2070 /*
2071 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
2072 * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
2073 * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
2074 */
2075 pVCpu->hm.s.fForceTLBFlush = true;
2076 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2077 }
2078
2079 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2080 if (pVCpu->hm.s.fForceTLBFlush)
2081 {
2082 ++pCpu->uCurrentAsid;
2083 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
2084 {
2085 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
2086 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
2087 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
2088 }
2089
2090 pVCpu->hm.s.fForceTLBFlush = false;
2091 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2092 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
2093 if (pCpu->fFlushAsidBeforeUse)
2094 {
2095 if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_SINGLE_CONTEXT)
2096 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
2097 else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_ALL_CONTEXTS)
2098 {
2099 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
2100 pCpu->fFlushAsidBeforeUse = false;
2101 }
2102 else
2103 {
2104 /* hmR0VmxSetupTaggedTlb() ensures we never get here. Paranoia. */
2105 AssertMsgFailed(("Unsupported VPID-flush context type.\n"));
2106 }
2107 }
2108 }
2109
2110 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2111 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2112 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2113 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
2114 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2115 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2116 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2117
2118 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
2119 AssertRC(rc);
2120}
2121
2122
2123/**
2124 * Flushes the guest TLB entry based on CPU capabilities.
2125 *
2126 * @param pVCpu The cross context virtual CPU structure.
2127 * @param pCpu Pointer to the global HM CPU struct.
2128 */
2129DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2130{
2131#ifdef HMVMX_ALWAYS_FLUSH_TLB
2132 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2133#endif
2134 PVM pVM = pVCpu->CTX_SUFF(pVM);
2135 switch (pVM->hm.s.vmx.uFlushTaggedTlb)
2136 {
2137 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu, pCpu); break;
2138 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu, pCpu); break;
2139 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu, pCpu); break;
2140 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu, pCpu); break;
2141 default:
2142 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
2143 break;
2144 }
2145
2146 /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */
2147}
2148
2149
2150/**
2151 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
2152 * TLB entries from the host TLB before VM-entry.
2153 *
2154 * @returns VBox status code.
2155 * @param pVM The cross context VM structure.
2156 */
2157static int hmR0VmxSetupTaggedTlb(PVM pVM)
2158{
2159 /*
2160 * Determine optimal flush type for Nested Paging.
2161 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
2162 * guest execution (see hmR3InitFinalizeR0()).
2163 */
2164 if (pVM->hm.s.fNestedPaging)
2165 {
2166 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
2167 {
2168 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
2169 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_SINGLE_CONTEXT;
2170 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
2171 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_ALL_CONTEXTS;
2172 else
2173 {
2174 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
2175 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2176 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_FLUSH_TYPE_UNSUPPORTED;
2177 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2178 }
2179
2180 /* Make sure the write-back cacheable memory type for EPT is supported. */
2181 if (RT_UNLIKELY(!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB)))
2182 {
2183 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2184 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_MEM_TYPE_NOT_WB;
2185 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2186 }
2187
2188 /* EPT requires a page-walk length of 4. */
2189 if (RT_UNLIKELY(!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4)))
2190 {
2191 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2192 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_PAGE_WALK_LENGTH_UNSUPPORTED;
2193 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2194 }
2195 }
2196 else
2197 {
2198 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
2199 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2200 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_INVEPT_UNAVAILABLE;
2201 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2202 }
2203 }
2204
2205 /*
2206 * Determine optimal flush type for VPID.
2207 */
2208 if (pVM->hm.s.vmx.fVpid)
2209 {
2210 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
2211 {
2212 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
2213 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_SINGLE_CONTEXT;
2214 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
2215 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_ALL_CONTEXTS;
2216 else
2217 {
2218 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
2219 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2220 LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
2221 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
2222 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
2223 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
2224 pVM->hm.s.vmx.fVpid = false;
2225 }
2226 }
2227 else
2228 {
2229 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
2230 Log4(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
2231 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
2232 pVM->hm.s.vmx.fVpid = false;
2233 }
2234 }
2235
2236 /*
2237 * Setup the handler for flushing tagged-TLBs.
2238 */
2239 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
2240 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID;
2241 else if (pVM->hm.s.fNestedPaging)
2242 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT;
2243 else if (pVM->hm.s.vmx.fVpid)
2244 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID;
2245 else
2246 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE;
2247 return VINF_SUCCESS;
2248}
2249
2250
2251/**
2252 * Sets up pin-based VM-execution controls in the VMCS.
2253 *
2254 * @returns VBox status code.
2255 * @param pVM The cross context VM structure.
2256 * @param pVCpu The cross context virtual CPU structure.
2257 */
2258static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
2259{
2260 AssertPtr(pVM);
2261 AssertPtr(pVCpu);
2262
2263 uint32_t val = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */
2264 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
2265
2266 val |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts cause a VM-exit. */
2267 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */
2268
2269 if (pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
2270 val |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */
2271
2272 /* Enable the VMX preemption timer. */
2273 if (pVM->hm.s.vmx.fUsePreemptTimer)
2274 {
2275 Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
2276 val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
2277 }
2278
2279 if ((val & zap) != val)
2280 {
2281 LogRel(("hmR0VmxSetupPinCtls: Invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
2282 pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, val, zap));
2283 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
2284 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2285 }
2286
2287 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, val);
2288 AssertRCReturn(rc, rc);
2289
2290 pVCpu->hm.s.vmx.u32PinCtls = val;
2291 return rc;
2292}
2293
2294
2295/**
2296 * Sets up processor-based VM-execution controls in the VMCS.
2297 *
2298 * @returns VBox status code.
2299 * @param pVM The cross context VM structure.
2300 * @param pVCpu The cross context virtual CPU structure.
2301 */
2302static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
2303{
2304 AssertPtr(pVM);
2305 AssertPtr(pVCpu);
2306
2307 int rc = VERR_INTERNAL_ERROR_5;
2308 uint32_t val = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */
2309 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2310
2311 val |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */
2312 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
2313 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
2314 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
2315 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */
2316 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */
2317 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
2318
2319 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
2320 if ( !(pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
2321 || (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
2322 {
2323 LogRel(("hmR0VmxSetupProcCtls: Unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
2324 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
2325 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2326 }
2327
2328 /* Without Nested Paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
2329 if (!pVM->hm.s.fNestedPaging)
2330 {
2331 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
2332 val |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
2333 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
2334 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
2335 }
2336
2337 /* Use TPR shadowing if supported by the CPU. */
2338 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
2339 {
2340 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2341 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
2342 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
2343 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
2344 AssertRCReturn(rc, rc);
2345
2346 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
2347 /* CR8 writes cause a VM-exit based on TPR threshold. */
2348 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
2349 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
2350 }
2351 else
2352 {
2353 /*
2354 * Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is invalid on 32-bit Intel CPUs.
2355 * Set this control only for 64-bit guests.
2356 */
2357 if (pVM->hm.s.fAllow64BitGuests)
2358 {
2359 val |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */
2360 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */
2361 }
2362 }
2363
2364 /* Use MSR-bitmaps if supported by the CPU. */
2365 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
2366 {
2367 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
2368
2369 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2370 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
2371 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2372 AssertRCReturn(rc, rc);
2373
2374 /*
2375 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
2376 * automatically using dedicated fields in the VMCS.
2377 */
2378 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2379 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2380 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2381 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2382 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2383
2384#if HC_ARCH_BITS == 64
2385 /*
2386 * Set passthru permissions for the following MSRs (mandatory for VT-x) required for 64-bit guests.
2387 */
2388 if (pVM->hm.s.fAllow64BitGuests)
2389 {
2390 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2391 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2392 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2393 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2394 }
2395#endif
2396 }
2397
2398 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
2399 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
2400 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
2401
2402 if ((val & zap) != val)
2403 {
2404 LogRel(("hmR0VmxSetupProcCtls: Invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
2405 pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, val, zap));
2406 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
2407 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2408 }
2409
2410 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, val);
2411 AssertRCReturn(rc, rc);
2412
2413 pVCpu->hm.s.vmx.u32ProcCtls = val;
2414
2415 /*
2416 * Secondary processor-based VM-execution controls.
2417 */
2418 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
2419 {
2420 val = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
2421 zap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2422
2423 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
2424 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
2425
2426 if (pVM->hm.s.fNestedPaging)
2427 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */
2428 else
2429 {
2430 /*
2431 * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
2432 * VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT when INVPCID is executed by the guest.
2433 * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
2434 */
2435 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
2436 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
2437 }
2438
2439 if (pVM->hm.s.vmx.fVpid)
2440 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */
2441
2442 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2443 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
2444
2445 /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
2446 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
2447 * done dynamically. */
2448 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
2449 {
2450 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
2451 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
2452 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
2453 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
2454 AssertRCReturn(rc, rc);
2455 }
2456
2457 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
2458 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
2459
2460 if ( pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT
2461 && pVM->hm.s.vmx.cPleGapTicks
2462 && pVM->hm.s.vmx.cPleWindowTicks)
2463 {
2464 val |= VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT; /* Enable pause-loop exiting. */
2465
2466 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks);
2467 AssertRCReturn(rc, rc);
2468
2469 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks);
2470 AssertRCReturn(rc, rc);
2471 }
2472
2473 if ((val & zap) != val)
2474 {
2475 LogRel(("hmR0VmxSetupProcCtls: Invalid secondary processor-based VM-execution controls combo! "
2476 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, val, zap));
2477 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
2478 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2479 }
2480
2481 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, val);
2482 AssertRCReturn(rc, rc);
2483
2484 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
2485 }
2486 else if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest))
2487 {
2488 LogRel(("hmR0VmxSetupProcCtls: Unrestricted Guest set as true when secondary processor-based VM-execution controls not "
2489 "available\n"));
2490 pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;
2491 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2492 }
2493
2494 return VINF_SUCCESS;
2495}
2496
2497
2498/**
2499 * Sets up miscellaneous (everything other than Pin & Processor-based
2500 * VM-execution) control fields in the VMCS.
2501 *
2502 * @returns VBox status code.
2503 * @param pVM The cross context VM structure.
2504 * @param pVCpu The cross context virtual CPU structure.
2505 */
2506static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
2507{
2508 NOREF(pVM);
2509 AssertPtr(pVM);
2510 AssertPtr(pVCpu);
2511
2512 int rc = VERR_GENERAL_FAILURE;
2513
2514 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2515#if 0
2516 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestCR3AndCR4())*/
2517 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); AssertRCReturn(rc, rc);
2518 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); AssertRCReturn(rc, rc);
2519
2520 /*
2521 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
2522 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
2523 * We thus use the exception bitmap to control it rather than use both.
2524 */
2525 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0); AssertRCReturn(rc, rc);
2526 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); AssertRCReturn(rc, rc);
2527
2528 /** @todo Explore possibility of using IO-bitmaps. */
2529 /* All IO & IOIO instructions cause VM-exits. */
2530 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); AssertRCReturn(rc, rc);
2531 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); AssertRCReturn(rc, rc);
2532
2533 /* Initialize the MSR-bitmap area. */
2534 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
2535 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); AssertRCReturn(rc, rc);
2536 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
2537#endif
2538
2539 /* Setup MSR auto-load/store area. */
2540 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
2541 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
2542 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2543 AssertRCReturn(rc, rc);
2544 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2545 AssertRCReturn(rc, rc);
2546
2547 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
2548 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
2549 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
2550 AssertRCReturn(rc, rc);
2551
2552 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
2553 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
2554 AssertRCReturn(rc, rc);
2555
2556 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2557#if 0
2558 /* Setup debug controls */
2559 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
2560 AssertRCReturn(rc, rc);
2561 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
2562 AssertRCReturn(rc, rc);
2563#endif
2564
2565 return rc;
2566}
2567
2568
2569/**
2570 * Sets up the initial exception bitmap in the VMCS based on static conditions.
2571 *
2572 * @returns VBox status code.
2573 * @param pVM The cross context VM structure.
2574 * @param pVCpu The cross context virtual CPU structure.
2575 */
2576static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
2577{
2578 AssertPtr(pVM);
2579 AssertPtr(pVCpu);
2580
2581 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
2582
2583 uint32_t u32XcptBitmap = pVCpu->hm.s.fGIMTrapXcptUD ? RT_BIT(X86_XCPT_UD) : 0;
2584
2585 /* Must always intercept #AC to prevent the guest from hanging the CPU. */
2586 u32XcptBitmap |= RT_BIT_32(X86_XCPT_AC);
2587
2588 /* Because we need to maintain the DR6 state even when intercepting DRx reads
2589 and writes, and because recursive #DBs can cause the CPU hang, we must always
2590 intercept #DB. */
2591 u32XcptBitmap |= RT_BIT_32(X86_XCPT_DB);
2592
2593 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
2594 if (!pVM->hm.s.fNestedPaging)
2595 u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
2596
2597 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
2598 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
2599 AssertRCReturn(rc, rc);
2600 return rc;
2601}
2602
2603
2604/**
2605 * Sets up the initial guest-state mask. The guest-state mask is consulted
2606 * before reading guest-state fields from the VMCS as VMREADs can be expensive
2607 * for the nested virtualization case (as it would cause a VM-exit).
2608 *
2609 * @param pVCpu The cross context virtual CPU structure.
2610 */
2611static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
2612{
2613 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
2614 HMVMXCPU_GST_RESET_TO(pVCpu, HMVMX_UPDATED_GUEST_ALL);
2615 return VINF_SUCCESS;
2616}
2617
2618
2619/**
2620 * Does per-VM VT-x initialization.
2621 *
2622 * @returns VBox status code.
2623 * @param pVM The cross context VM structure.
2624 */
2625VMMR0DECL(int) VMXR0InitVM(PVM pVM)
2626{
2627 LogFlowFunc(("pVM=%p\n", pVM));
2628
2629 int rc = hmR0VmxStructsAlloc(pVM);
2630 if (RT_FAILURE(rc))
2631 {
2632 LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
2633 return rc;
2634 }
2635
2636 return VINF_SUCCESS;
2637}
2638
2639
2640/**
2641 * Does per-VM VT-x termination.
2642 *
2643 * @returns VBox status code.
2644 * @param pVM The cross context VM structure.
2645 */
2646VMMR0DECL(int) VMXR0TermVM(PVM pVM)
2647{
2648 LogFlowFunc(("pVM=%p\n", pVM));
2649
2650#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2651 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
2652 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
2653#endif
2654 hmR0VmxStructsFree(pVM);
2655 return VINF_SUCCESS;
2656}
2657
2658
2659/**
2660 * Sets up the VM for execution under VT-x.
2661 * This function is only called once per-VM during initialization.
2662 *
2663 * @returns VBox status code.
2664 * @param pVM The cross context VM structure.
2665 */
2666VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
2667{
2668 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
2669 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2670
2671 LogFlowFunc(("pVM=%p\n", pVM));
2672
2673 /*
2674 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
2675 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0Intel().
2676 */
2677 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
2678 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
2679 || !pVM->hm.s.vmx.pRealModeTSS))
2680 {
2681 LogRel(("VMXR0SetupVM: Invalid real-on-v86 state.\n"));
2682 return VERR_INTERNAL_ERROR;
2683 }
2684
2685 /* Initialize these always, see hmR3InitFinalizeR0().*/
2686 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NONE;
2687 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NONE;
2688
2689 /* Setup the tagged-TLB flush handlers. */
2690 int rc = hmR0VmxSetupTaggedTlb(pVM);
2691 if (RT_FAILURE(rc))
2692 {
2693 LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
2694 return rc;
2695 }
2696
2697 /* Check if we can use the VMCS controls for swapping the EFER MSR. */
2698 Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
2699#if HC_ARCH_BITS == 64
2700 if ( (pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
2701 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
2702 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR))
2703 {
2704 pVM->hm.s.vmx.fSupportsVmcsEfer = true;
2705 }
2706#endif
2707
2708 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2709 {
2710 PVMCPU pVCpu = &pVM->aCpus[i];
2711 AssertPtr(pVCpu);
2712 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
2713
2714 /* Log the VCPU pointers, useful for debugging SMP VMs. */
2715 Log4(("VMXR0SetupVM: pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
2716
2717 /* Initialize the VM-exit history array with end-of-array markers (UINT16_MAX). */
2718 Assert(!pVCpu->hm.s.idxExitHistoryFree);
2719 HMCPU_EXIT_HISTORY_RESET(pVCpu);
2720
2721 /* Set revision dword at the beginning of the VMCS structure. */
2722 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
2723
2724 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
2725 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2726 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2727 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2728
2729 /* Load this VMCS as the current VMCS. */
2730 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2731 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2732 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2733
2734 rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
2735 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2736 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2737
2738 rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
2739 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2740 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2741
2742 rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
2743 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2744 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2745
2746 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
2747 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2748 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2749
2750 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
2751 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2752 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2753
2754#if HC_ARCH_BITS == 32
2755 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
2756 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2757 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2758#endif
2759
2760 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
2761 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2762 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2763 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2764
2765 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
2766
2767 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
2768 }
2769
2770 return VINF_SUCCESS;
2771}
2772
2773
2774/**
2775 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2776 * the VMCS.
2777 *
2778 * @returns VBox status code.
2779 * @param pVM The cross context VM structure.
2780 * @param pVCpu The cross context virtual CPU structure.
2781 */
2782DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
2783{
2784 NOREF(pVM); NOREF(pVCpu);
2785
2786 RTCCUINTREG uReg = ASMGetCR0();
2787 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2788 AssertRCReturn(rc, rc);
2789
2790 uReg = ASMGetCR3();
2791 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2792 AssertRCReturn(rc, rc);
2793
2794 uReg = ASMGetCR4();
2795 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2796 AssertRCReturn(rc, rc);
2797 return rc;
2798}
2799
2800
2801#if HC_ARCH_BITS == 64
2802/**
2803 * Macro for adjusting host segment selectors to satisfy VT-x's VM-entry
2804 * requirements. See hmR0VmxSaveHostSegmentRegs().
2805 */
2806# define VMXLOCAL_ADJUST_HOST_SEG(seg, selValue) \
2807 if ((selValue) & (X86_SEL_RPL | X86_SEL_LDT)) \
2808 { \
2809 bool fValidSelector = true; \
2810 if ((selValue) & X86_SEL_LDT) \
2811 { \
2812 uint32_t uAttr = ASMGetSegAttr((selValue)); \
2813 fValidSelector = RT_BOOL(uAttr != UINT32_MAX && (uAttr & X86_DESC_P)); \
2814 } \
2815 if (fValidSelector) \
2816 { \
2817 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##seg; \
2818 pVCpu->hm.s.vmx.RestoreHost.uHostSel##seg = (selValue); \
2819 } \
2820 (selValue) = 0; \
2821 }
2822#endif
2823
2824
2825/**
2826 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2827 * the host-state area in the VMCS.
2828 *
2829 * @returns VBox status code.
2830 * @param pVM The cross context VM structure.
2831 * @param pVCpu The cross context virtual CPU structure.
2832 */
2833DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
2834{
2835 int rc = VERR_INTERNAL_ERROR_5;
2836
2837#if HC_ARCH_BITS == 64
2838 /*
2839 * If we've executed guest code using VT-x, the host-state bits will be messed up. We
2840 * should -not- save the messed up state without restoring the original host-state. See @bugref{7240}.
2841 */
2842 AssertMsgReturn(!(pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED),
2843 ("Re-saving host-state after executing guest code without leaving VT-x!\n"), VERR_WRONG_ORDER);
2844#endif
2845
2846 /*
2847 * Host DS, ES, FS and GS segment registers.
2848 */
2849#if HC_ARCH_BITS == 64
2850 RTSEL uSelDS = ASMGetDS();
2851 RTSEL uSelES = ASMGetES();
2852 RTSEL uSelFS = ASMGetFS();
2853 RTSEL uSelGS = ASMGetGS();
2854#else
2855 RTSEL uSelDS = 0;
2856 RTSEL uSelES = 0;
2857 RTSEL uSelFS = 0;
2858 RTSEL uSelGS = 0;
2859#endif
2860
2861 /* Recalculate which host-state bits need to be manually restored. */
2862 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
2863
2864 /*
2865 * Host CS and SS segment registers.
2866 */
2867 RTSEL uSelCS = ASMGetCS();
2868 RTSEL uSelSS = ASMGetSS();
2869
2870 /*
2871 * Host TR segment register.
2872 */
2873 RTSEL uSelTR = ASMGetTR();
2874
2875#if HC_ARCH_BITS == 64
2876 /*
2877 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them
2878 * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
2879 */
2880 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS);
2881 VMXLOCAL_ADJUST_HOST_SEG(ES, uSelES);
2882 VMXLOCAL_ADJUST_HOST_SEG(FS, uSelFS);
2883 VMXLOCAL_ADJUST_HOST_SEG(GS, uSelGS);
2884# undef VMXLOCAL_ADJUST_HOST_SEG
2885#endif
2886
2887 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2888 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2889 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2890 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2891 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2892 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2893 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2894 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));
2895 Assert(uSelCS);
2896 Assert(uSelTR);
2897
2898 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2899#if 0
2900 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE))
2901 Assert(uSelSS != 0);
2902#endif
2903
2904 /* Write these host selector fields into the host-state area in the VMCS. */
2905 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); AssertRCReturn(rc, rc);
2906 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); AssertRCReturn(rc, rc);
2907#if HC_ARCH_BITS == 64
2908 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); AssertRCReturn(rc, rc);
2909 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); AssertRCReturn(rc, rc);
2910 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS); AssertRCReturn(rc, rc);
2911 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS); AssertRCReturn(rc, rc);
2912#else
2913 NOREF(uSelDS);
2914 NOREF(uSelES);
2915 NOREF(uSelFS);
2916 NOREF(uSelGS);
2917#endif
2918 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR); AssertRCReturn(rc, rc);
2919
2920 /*
2921 * Host GDTR and IDTR.
2922 */
2923 RTGDTR Gdtr;
2924 RTIDTR Idtr;
2925 RT_ZERO(Gdtr);
2926 RT_ZERO(Idtr);
2927 ASMGetGDTR(&Gdtr);
2928 ASMGetIDTR(&Idtr);
2929 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); AssertRCReturn(rc, rc);
2930 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); AssertRCReturn(rc, rc);
2931
2932#if HC_ARCH_BITS == 64
2933 /*
2934 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the
2935 * maximum limit (0xffff) on every VM-exit.
2936 */
2937 if (Gdtr.cbGdt != 0xffff)
2938 {
2939 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
2940 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
2941 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
2942 }
2943
2944 /*
2945 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT"
2946 * and Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit as 0xfff, VT-x
2947 * bloating the limit to 0xffff shouldn't cause any different CPU behavior. However, several hosts either insists
2948 * on 0xfff being the limit (Windows Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there
2949 * but botches sidt alignment in at least one consumer). So, we're only allowing IDTR.LIMIT to be left at 0xffff on
2950 * hosts where we are pretty sure it won't cause trouble.
2951 */
2952# if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
2953 if (Idtr.cbIdt < 0x0fff)
2954# else
2955 if (Idtr.cbIdt != 0xffff)
2956# endif
2957 {
2958 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
2959 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
2960 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
2961 }
2962#endif
2963
2964 /*
2965 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
2966 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
2967 */
2968 AssertMsgReturn((uSelTR | X86_SEL_RPL_LDT) <= Gdtr.cbGdt,
2969 ("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt),
2970 VERR_VMX_INVALID_HOST_STATE);
2971
2972 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
2973#if HC_ARCH_BITS == 64
2974 uintptr_t uTRBase = X86DESC64_BASE(pDesc);
2975
2976 /*
2977 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits.
2978 * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else.
2979 * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode.
2980 * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
2981 *
2982 * [1] See Intel spec. 3.5 "System Descriptor Types".
2983 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
2984 */
2985 Assert(pDesc->System.u4Type == 11);
2986 if ( pDesc->System.u16LimitLow != 0x67
2987 || pDesc->System.u4LimitHigh)
2988 {
2989 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
2990 /* If the host has made GDT read-only, we would need to temporarily toggle CR0.WP before writing the GDT. */
2991 if (pVM->hm.s.fHostKernelFeatures & SUPKERNELFEATURES_GDT_READ_ONLY)
2992 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_READ_ONLY;
2993 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
2994
2995 /* Store the GDTR here as we need it while restoring TR. */
2996 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
2997 }
2998#else
2999 NOREF(pVM);
3000 uintptr_t uTRBase = X86DESC_BASE(pDesc);
3001#endif
3002 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
3003 AssertRCReturn(rc, rc);
3004
3005 /*
3006 * Host FS base and GS base.
3007 */
3008#if HC_ARCH_BITS == 64
3009 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
3010 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
3011 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); AssertRCReturn(rc, rc);
3012 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); AssertRCReturn(rc, rc);
3013
3014 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
3015 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
3016 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
3017 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
3018 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
3019#endif
3020 return rc;
3021}
3022
3023
3024/**
3025 * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
3026 * host-state area of the VMCS. Theses MSRs will be automatically restored on
3027 * the host after every successful VM-exit.
3028 *
3029 * @returns VBox status code.
3030 * @param pVM The cross context VM structure.
3031 * @param pVCpu The cross context virtual CPU structure.
3032 *
3033 * @remarks No-long-jump zone!!!
3034 */
3035DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
3036{
3037 NOREF(pVM);
3038
3039 AssertPtr(pVCpu);
3040 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
3041
3042 int rc = VINF_SUCCESS;
3043#if HC_ARCH_BITS == 64
3044 if (pVM->hm.s.fAllow64BitGuests)
3045 hmR0VmxLazySaveHostMsrs(pVCpu);
3046#endif
3047
3048 /*
3049 * Host Sysenter MSRs.
3050 */
3051 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
3052 AssertRCReturn(rc, rc);
3053#if HC_ARCH_BITS == 32
3054 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
3055 AssertRCReturn(rc, rc);
3056 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
3057#else
3058 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
3059 AssertRCReturn(rc, rc);
3060 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
3061#endif
3062 AssertRCReturn(rc, rc);
3063
3064 /*
3065 * Host EFER MSR.
3066 * If the CPU supports the newer VMCS controls for managing EFER, use it.
3067 * Otherwise it's done as part of auto-load/store MSR area in the VMCS, see hmR0VmxLoadGuestMsrs().
3068 */
3069 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
3070 {
3071 rc = VMXWriteVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, pVM->hm.s.vmx.u64HostEfer);
3072 AssertRCReturn(rc, rc);
3073 }
3074
3075 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see
3076 * hmR0VmxLoadGuestExitCtls() !! */
3077
3078 return rc;
3079}
3080
3081
3082/**
3083 * Figures out if we need to swap the EFER MSR which is
3084 * particularly expensive.
3085 *
3086 * We check all relevant bits. For now, that's everything
3087 * besides LMA/LME, as these two bits are handled by VM-entry,
3088 * see hmR0VmxLoadGuestExitCtls() and
3089 * hmR0VMxLoadGuestEntryCtls().
3090 *
3091 * @returns true if we need to load guest EFER, false otherwise.
3092 * @param pVCpu The cross context virtual CPU structure.
3093 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3094 * out-of-sync. Make sure to update the required fields
3095 * before using them.
3096 *
3097 * @remarks Requires EFER, CR4.
3098 * @remarks No-long-jump zone!!!
3099 */
3100static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3101{
3102#ifdef HMVMX_ALWAYS_SWAP_EFER
3103 return true;
3104#endif
3105
3106#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
3107 /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */
3108 if (CPUMIsGuestInLongMode(pVCpu))
3109 return false;
3110#endif
3111
3112 PVM pVM = pVCpu->CTX_SUFF(pVM);
3113 uint64_t u64HostEfer = pVM->hm.s.vmx.u64HostEfer;
3114 uint64_t u64GuestEfer = pMixedCtx->msrEFER;
3115
3116 /*
3117 * For 64-bit guests, if EFER.SCE bit differs, we need to swap to ensure that the
3118 * guest's SYSCALL behaviour isn't screwed. See @bugref{7386}.
3119 */
3120 if ( CPUMIsGuestInLongMode(pVCpu)
3121 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
3122 {
3123 return true;
3124 }
3125
3126 /*
3127 * If the guest uses PAE and EFER.NXE bit differs, we need to swap EFER as it
3128 * affects guest paging. 64-bit paging implies CR4.PAE as well.
3129 * See Intel spec. 4.5 "IA-32e Paging" and Intel spec. 4.1.1 "Three Paging Modes".
3130 */
3131 if ( (pMixedCtx->cr4 & X86_CR4_PAE)
3132 && (pMixedCtx->cr0 & X86_CR0_PG)
3133 && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
3134 {
3135 /* Assert that host is PAE capable. */
3136 Assert(pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_NX);
3137 return true;
3138 }
3139
3140 /** @todo Check the latest Intel spec. for any other bits,
3141 * like SMEP/SMAP? */
3142 return false;
3143}
3144
3145
3146/**
3147 * Sets up VM-entry controls in the VMCS. These controls can affect things done
3148 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
3149 * controls".
3150 *
3151 * @returns VBox status code.
3152 * @param pVCpu The cross context virtual CPU structure.
3153 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3154 * out-of-sync. Make sure to update the required fields
3155 * before using them.
3156 *
3157 * @remarks Requires EFER.
3158 * @remarks No-long-jump zone!!!
3159 */
3160DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3161{
3162 int rc = VINF_SUCCESS;
3163 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS))
3164 {
3165 PVM pVM = pVCpu->CTX_SUFF(pVM);
3166 uint32_t val = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */
3167 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3168
3169 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
3170 val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
3171
3172 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
3173 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3174 {
3175 val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
3176 Log4(("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n", pVCpu->idCpu));
3177 }
3178 else
3179 Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
3180
3181 /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */
3182 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3183 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3184 {
3185 val |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR;
3186 Log4(("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n", pVCpu->idCpu));
3187 }
3188
3189 /*
3190 * The following should -not- be set (since we're not in SMM mode):
3191 * - VMX_VMCS_CTRL_ENTRY_ENTRY_SMM
3192 * - VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON
3193 */
3194
3195 /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
3196 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR. */
3197
3198 if ((val & zap) != val)
3199 {
3200 LogRel(("hmR0VmxLoadGuestEntryCtls: Invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
3201 pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0, val, zap));
3202 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
3203 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3204 }
3205
3206 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, val);
3207 AssertRCReturn(rc, rc);
3208
3209 pVCpu->hm.s.vmx.u32EntryCtls = val;
3210 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS);
3211 }
3212 return rc;
3213}
3214
3215
3216/**
3217 * Sets up the VM-exit controls in the VMCS.
3218 *
3219 * @returns VBox status code.
3220 * @param pVCpu The cross context virtual CPU structure.
3221 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3222 * out-of-sync. Make sure to update the required fields
3223 * before using them.
3224 *
3225 * @remarks Requires EFER.
3226 */
3227DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3228{
3229 NOREF(pMixedCtx);
3230
3231 int rc = VINF_SUCCESS;
3232 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS))
3233 {
3234 PVM pVM = pVCpu->CTX_SUFF(pVM);
3235 uint32_t val = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0; /* Bits set here must be set in the VMCS. */
3236 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3237
3238 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
3239 val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
3240
3241 /*
3242 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary.
3243 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in hmR0VmxSaveHostMsrs().
3244 */
3245#if HC_ARCH_BITS == 64
3246 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3247 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
3248#else
3249 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3250 {
3251 /* The switcher returns to long mode, EFER is managed by the switcher. */
3252 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3253 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
3254 }
3255 else
3256 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
3257#endif
3258
3259 /* If the newer VMCS fields for managing EFER exists, use it. */
3260 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3261 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3262 {
3263 val |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR
3264 | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;
3265 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n", pVCpu->idCpu));
3266 }
3267
3268 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
3269 Assert(!(val & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
3270
3271 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
3272 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
3273 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR. */
3274
3275 if ( pVM->hm.s.vmx.fUsePreemptTimer
3276 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER))
3277 val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
3278
3279 if ((val & zap) != val)
3280 {
3281 LogRel(("hmR0VmxSetupProcCtls: Invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
3282 pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, val, zap));
3283 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
3284 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3285 }
3286
3287 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, val);
3288 AssertRCReturn(rc, rc);
3289
3290 pVCpu->hm.s.vmx.u32ExitCtls = val;
3291 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_EXIT_CTLS);
3292 }
3293 return rc;
3294}
3295
3296
3297/**
3298 * Loads the guest APIC and related state.
3299 *
3300 * @returns VBox status code.
3301 * @param pVCpu The cross context virtual CPU structure.
3302 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3303 * out-of-sync. Make sure to update the required fields
3304 * before using them.
3305 */
3306DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3307{
3308 NOREF(pMixedCtx);
3309
3310 int rc = VINF_SUCCESS;
3311 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE))
3312 {
3313 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
3314 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
3315 {
3316 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
3317
3318 bool fPendingIntr = false;
3319 uint8_t u8Tpr = 0;
3320 uint8_t u8PendingIntr = 0;
3321 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
3322 AssertRCReturn(rc, rc);
3323
3324 /*
3325 * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when
3326 * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt.
3327 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
3328 * the interrupt when we VM-exit for other reasons.
3329 */
3330 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
3331 uint32_t u32TprThreshold = 0;
3332 if (fPendingIntr)
3333 {
3334 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */
3335 const uint8_t u8PendingPriority = (u8PendingIntr >> 4) & 0xf;
3336 const uint8_t u8TprPriority = (u8Tpr >> 4) & 0xf;
3337 if (u8PendingPriority <= u8TprPriority)
3338 u32TprThreshold = u8PendingPriority;
3339 else
3340 u32TprThreshold = u8TprPriority; /* Required for Vista 64-bit guest, see @bugref{6398}. */
3341 }
3342 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
3343
3344 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
3345 AssertRCReturn(rc, rc);
3346 }
3347
3348 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
3349 }
3350 return rc;
3351}
3352
3353
3354/**
3355 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
3356 *
3357 * @returns Guest's interruptibility-state.
3358 * @param pVCpu The cross context virtual CPU structure.
3359 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3360 * out-of-sync. Make sure to update the required fields
3361 * before using them.
3362 *
3363 * @remarks No-long-jump zone!!!
3364 */
3365DECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3366{
3367 /*
3368 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
3369 */
3370 uint32_t uIntrState = 0;
3371 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3372 {
3373 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
3374 AssertMsg(HMVMXCPU_GST_IS_SET(pVCpu, HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS),
3375 ("%#x\n", HMVMXCPU_GST_VALUE(pVCpu)));
3376 if (pMixedCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
3377 {
3378 if (pMixedCtx->eflags.Bits.u1IF)
3379 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
3380 else
3381 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
3382 }
3383 /* else: Although we can clear the force-flag here, let's keep this side-effects free. */
3384 }
3385
3386 /*
3387 * NMIs to the guest are blocked after an NMI is injected until the guest executes an IRET. We only
3388 * bother with virtual-NMI blocking when we have support for virtual NMIs in the CPU, otherwise
3389 * setting this would block host-NMIs and IRET will not clear the blocking.
3390 *
3391 * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}.
3392 */
3393 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)
3394 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
3395 {
3396 uIntrState |= VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI;
3397 }
3398
3399 return uIntrState;
3400}
3401
3402
3403/**
3404 * Loads the guest's interruptibility-state into the guest-state area in the
3405 * VMCS.
3406 *
3407 * @returns VBox status code.
3408 * @param pVCpu The cross context virtual CPU structure.
3409 * @param uIntrState The interruptibility-state to set.
3410 */
3411static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
3412{
3413 NOREF(pVCpu);
3414 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */
3415 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
3416 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
3417 AssertRCReturn(rc, rc);
3418 return rc;
3419}
3420
3421
3422/**
3423 * Loads the exception intercepts required for guest execution in the VMCS.
3424 *
3425 * @returns VBox status code.
3426 * @param pVCpu The cross context virtual CPU structure.
3427 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3428 * out-of-sync. Make sure to update the required fields
3429 * before using them.
3430 */
3431static int hmR0VmxLoadGuestXcptIntercepts(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3432{
3433 NOREF(pMixedCtx);
3434 int rc = VINF_SUCCESS;
3435 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
3436 {
3437 /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxLoadSharedCR0(). */
3438 if (pVCpu->hm.s.fGIMTrapXcptUD)
3439 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_UD);
3440 else
3441 {
3442#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3443 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_UD);
3444#endif
3445 }
3446
3447 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_AC));
3448 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB));
3449
3450 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3451 AssertRCReturn(rc, rc);
3452
3453 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
3454 Log4(("Load[%RU32]: VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu,
3455 pVCpu->hm.s.vmx.u32XcptBitmap, HMCPU_CF_VALUE(pVCpu)));
3456 }
3457 return rc;
3458}
3459
3460
3461/**
3462 * Loads the guest's RIP into the guest-state area in the VMCS.
3463 *
3464 * @returns VBox status code.
3465 * @param pVCpu The cross context virtual CPU structure.
3466 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3467 * out-of-sync. Make sure to update the required fields
3468 * before using them.
3469 *
3470 * @remarks No-long-jump zone!!!
3471 */
3472static int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3473{
3474 int rc = VINF_SUCCESS;
3475 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))
3476 {
3477 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
3478 AssertRCReturn(rc, rc);
3479
3480 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP);
3481 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu, pMixedCtx->rip,
3482 HMCPU_CF_VALUE(pVCpu)));
3483 }
3484 return rc;
3485}
3486
3487
3488/**
3489 * Loads the guest's RSP into the guest-state area in the VMCS.
3490 *
3491 * @returns VBox status code.
3492 * @param pVCpu The cross context virtual CPU structure.
3493 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3494 * out-of-sync. Make sure to update the required fields
3495 * before using them.
3496 *
3497 * @remarks No-long-jump zone!!!
3498 */
3499static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3500{
3501 int rc = VINF_SUCCESS;
3502 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP))
3503 {
3504 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
3505 AssertRCReturn(rc, rc);
3506
3507 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP);
3508 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RSP=%#RX64\n", pVCpu->idCpu, pMixedCtx->rsp));
3509 }
3510 return rc;
3511}
3512
3513
3514/**
3515 * Loads the guest's RFLAGS into the guest-state area in the VMCS.
3516 *
3517 * @returns VBox status code.
3518 * @param pVCpu The cross context virtual CPU structure.
3519 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3520 * out-of-sync. Make sure to update the required fields
3521 * before using them.
3522 *
3523 * @remarks No-long-jump zone!!!
3524 */
3525static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3526{
3527 int rc = VINF_SUCCESS;
3528 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
3529 {
3530 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
3531 Let us assert it as such and use 32-bit VMWRITE. */
3532 Assert(!(pMixedCtx->rflags.u64 >> 32));
3533 X86EFLAGS Eflags = pMixedCtx->eflags;
3534 /** @todo r=bird: There shall be no need to OR in X86_EFL_1 here, nor
3535 * shall there be any reason for clearing bits 63:22, 15, 5 and 3.
3536 * These will never be cleared/set, unless some other part of the VMM
3537 * code is buggy - in which case we're better of finding and fixing
3538 * those bugs than hiding them. */
3539 Assert(Eflags.u32 & X86_EFL_RA1_MASK);
3540 Assert(!(Eflags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
3541 Eflags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
3542 Eflags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */
3543
3544 /*
3545 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM-exit.
3546 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
3547 */
3548 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3549 {
3550 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3551 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3552 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = Eflags.u32; /* Save the original eflags of the real-mode guest. */
3553 Eflags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
3554 Eflags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
3555 }
3556
3557 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, Eflags.u32);
3558 AssertRCReturn(rc, rc);
3559
3560 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS);
3561 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", pVCpu->idCpu, Eflags.u32));
3562 }
3563 return rc;
3564}
3565
3566
3567/**
3568 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
3569 *
3570 * @returns VBox status code.
3571 * @param pVCpu The cross context virtual CPU structure.
3572 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3573 * out-of-sync. Make sure to update the required fields
3574 * before using them.
3575 *
3576 * @remarks No-long-jump zone!!!
3577 */
3578DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3579{
3580 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
3581 AssertRCReturn(rc, rc);
3582 rc = hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
3583 AssertRCReturn(rc, rc);
3584 rc = hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
3585 AssertRCReturn(rc, rc);
3586 return rc;
3587}
3588
3589
3590/**
3591 * Loads the guest CR0 control register into the guest-state area in the VMCS.
3592 * CR0 is partially shared with the host and we have to consider the FPU bits.
3593 *
3594 * @returns VBox status code.
3595 * @param pVCpu The cross context virtual CPU structure.
3596 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3597 * out-of-sync. Make sure to update the required fields
3598 * before using them.
3599 *
3600 * @remarks No-long-jump zone!!!
3601 */
3602static int hmR0VmxLoadSharedCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3603{
3604 /*
3605 * Guest CR0.
3606 * Guest FPU.
3607 */
3608 int rc = VINF_SUCCESS;
3609 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
3610 {
3611 Assert(!(pMixedCtx->cr0 >> 32));
3612 uint32_t u32GuestCR0 = pMixedCtx->cr0;
3613 PVM pVM = pVCpu->CTX_SUFF(pVM);
3614
3615 /* The guest's view (read access) of its CR0 is unblemished. */
3616 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
3617 AssertRCReturn(rc, rc);
3618 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR0));
3619
3620 /* Setup VT-x's view of the guest CR0. */
3621 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
3622 if (pVM->hm.s.fNestedPaging)
3623 {
3624 if (CPUMIsGuestPagingEnabledEx(pMixedCtx))
3625 {
3626 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
3627 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3628 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
3629 }
3630 else
3631 {
3632 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
3633 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3634 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3635 }
3636
3637 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
3638 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3639 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3640
3641 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3642 AssertRCReturn(rc, rc);
3643 }
3644 else
3645 u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
3646
3647 /*
3648 * Guest FPU bits.
3649 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
3650 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
3651 */
3652 u32GuestCR0 |= X86_CR0_NE;
3653 bool fInterceptNM = false;
3654 if (CPUMIsGuestFPUStateActive(pVCpu))
3655 {
3656 fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
3657 /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
3658 We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
3659 }
3660 else
3661 {
3662 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
3663 u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
3664 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
3665 }
3666
3667 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
3668 bool fInterceptMF = false;
3669 if (!(pMixedCtx->cr0 & X86_CR0_NE))
3670 fInterceptMF = true;
3671
3672 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
3673 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3674 {
3675 Assert(PDMVmmDevHeapIsEnabled(pVM));
3676 Assert(pVM->hm.s.vmx.pRealModeTSS);
3677 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
3678 fInterceptNM = true;
3679 fInterceptMF = true;
3680 }
3681 else
3682 {
3683 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
3684 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
3685 }
3686 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
3687
3688 if (fInterceptNM)
3689 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
3690 else
3691 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
3692
3693 if (fInterceptMF)
3694 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
3695 else
3696 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
3697
3698 /* Additional intercepts for debugging, define these yourself explicitly. */
3699#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3700 pVCpu->hm.s.vmx.u32XcptBitmap |= 0
3701 | RT_BIT(X86_XCPT_BP)
3702 | RT_BIT(X86_XCPT_DE)
3703 | RT_BIT(X86_XCPT_NM)
3704 | RT_BIT(X86_XCPT_TS)
3705 | RT_BIT(X86_XCPT_UD)
3706 | RT_BIT(X86_XCPT_NP)
3707 | RT_BIT(X86_XCPT_SS)
3708 | RT_BIT(X86_XCPT_GP)
3709 | RT_BIT(X86_XCPT_PF)
3710 | RT_BIT(X86_XCPT_MF)
3711 ;
3712#elif defined(HMVMX_ALWAYS_TRAP_PF)
3713 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
3714#endif
3715
3716 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
3717
3718 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
3719 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3720 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3721 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
3722 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
3723 else
3724 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
3725
3726 u32GuestCR0 |= uSetCR0;
3727 u32GuestCR0 &= uZapCR0;
3728 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
3729
3730 /* Write VT-x's view of the guest CR0 into the VMCS. */
3731 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
3732 AssertRCReturn(rc, rc);
3733 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", pVCpu->idCpu, u32GuestCR0, uSetCR0,
3734 uZapCR0));
3735
3736 /*
3737 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
3738 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
3739 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
3740 */
3741 uint32_t u32CR0Mask = 0;
3742 u32CR0Mask = X86_CR0_PE
3743 | X86_CR0_NE
3744 | X86_CR0_WP
3745 | X86_CR0_PG
3746 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
3747 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
3748 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
3749
3750 /** @todo Avoid intercepting CR0.PE with unrestricted guests. Fix PGM
3751 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
3752 * and @bugref{6944}. */
3753#if 0
3754 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3755 u32CR0Mask &= ~X86_CR0_PE;
3756#endif
3757 if (pVM->hm.s.fNestedPaging)
3758 u32CR0Mask &= ~X86_CR0_WP;
3759
3760 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
3761 if (fInterceptNM)
3762 {
3763 u32CR0Mask |= X86_CR0_TS
3764 | X86_CR0_MP;
3765 }
3766
3767 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
3768 pVCpu->hm.s.vmx.u32CR0Mask = u32CR0Mask;
3769 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask);
3770 AssertRCReturn(rc, rc);
3771 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", pVCpu->idCpu, u32CR0Mask));
3772
3773 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
3774 }
3775 return rc;
3776}
3777
3778
3779/**
3780 * Loads the guest control registers (CR3, CR4) into the guest-state area
3781 * in the VMCS.
3782 *
3783 * @returns VBox status code.
3784 * @param pVCpu The cross context virtual CPU structure.
3785 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3786 * out-of-sync. Make sure to update the required fields
3787 * before using them.
3788 *
3789 * @remarks No-long-jump zone!!!
3790 */
3791static int hmR0VmxLoadGuestCR3AndCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3792{
3793 int rc = VINF_SUCCESS;
3794 PVM pVM = pVCpu->CTX_SUFF(pVM);
3795
3796 /*
3797 * Guest CR2.
3798 * It's always loaded in the assembler code. Nothing to do here.
3799 */
3800
3801 /*
3802 * Guest CR3.
3803 */
3804 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
3805 {
3806 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
3807 if (pVM->hm.s.fNestedPaging)
3808 {
3809 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
3810
3811 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
3812 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
3813 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
3814 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
3815
3816 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
3817 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
3818 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
3819
3820 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
3821 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
3822 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
3823 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3824 AssertMsg( !((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
3825 || (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY),
3826 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3827
3828 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
3829 AssertRCReturn(rc, rc);
3830 Log4(("Load[%RU32]: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->idCpu, pVCpu->hm.s.vmx.HCPhysEPTP));
3831
3832 if ( pVM->hm.s.vmx.fUnrestrictedGuest
3833 || CPUMIsGuestPagingEnabledEx(pMixedCtx))
3834 {
3835 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
3836 if (CPUMIsGuestInPAEModeEx(pMixedCtx))
3837 {
3838 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); AssertRCReturn(rc, rc);
3839 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
3840 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
3841 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
3842 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
3843 }
3844
3845 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
3846 have Unrestricted Execution to handle the guest when it's not using paging. */
3847 GCPhysGuestCR3 = pMixedCtx->cr3;
3848 }
3849 else
3850 {
3851 /*
3852 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
3853 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
3854 * EPT takes care of translating it to host-physical addresses.
3855 */
3856 RTGCPHYS GCPhys;
3857 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
3858 Assert(PDMVmmDevHeapIsEnabled(pVM));
3859
3860 /* We obtain it here every time as the guest could have relocated this PCI region. */
3861 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
3862 AssertRCReturn(rc, rc);
3863
3864 GCPhysGuestCR3 = GCPhys;
3865 }
3866
3867 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RGv (GstN)\n", pVCpu->idCpu, GCPhysGuestCR3));
3868 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
3869 }
3870 else
3871 {
3872 /* Non-nested paging case, just use the hypervisor's CR3. */
3873 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
3874
3875 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", pVCpu->idCpu, HCPhysGuestCR3));
3876 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
3877 }
3878 AssertRCReturn(rc, rc);
3879
3880 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
3881 }
3882
3883 /*
3884 * Guest CR4.
3885 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
3886 */
3887 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
3888 {
3889 Assert(!(pMixedCtx->cr4 >> 32));
3890 uint32_t u32GuestCR4 = pMixedCtx->cr4;
3891
3892 /* The guest's view of its CR4 is unblemished. */
3893 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
3894 AssertRCReturn(rc, rc);
3895 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR4));
3896
3897 /* Setup VT-x's view of the guest CR4. */
3898 /*
3899 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
3900 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
3901 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
3902 */
3903 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3904 {
3905 Assert(pVM->hm.s.vmx.pRealModeTSS);
3906 Assert(PDMVmmDevHeapIsEnabled(pVM));
3907 u32GuestCR4 &= ~X86_CR4_VME;
3908 }
3909
3910 if (pVM->hm.s.fNestedPaging)
3911 {
3912 if ( !CPUMIsGuestPagingEnabledEx(pMixedCtx)
3913 && !pVM->hm.s.vmx.fUnrestrictedGuest)
3914 {
3915 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
3916 u32GuestCR4 |= X86_CR4_PSE;
3917 /* Our identity mapping is a 32-bit page directory. */
3918 u32GuestCR4 &= ~X86_CR4_PAE;
3919 }
3920 /* else use guest CR4.*/
3921 }
3922 else
3923 {
3924 /*
3925 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
3926 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
3927 */
3928 switch (pVCpu->hm.s.enmShadowMode)
3929 {
3930 case PGMMODE_REAL: /* Real-mode. */
3931 case PGMMODE_PROTECTED: /* Protected mode without paging. */
3932 case PGMMODE_32_BIT: /* 32-bit paging. */
3933 {
3934 u32GuestCR4 &= ~X86_CR4_PAE;
3935 break;
3936 }
3937
3938 case PGMMODE_PAE: /* PAE paging. */
3939 case PGMMODE_PAE_NX: /* PAE paging with NX. */
3940 {
3941 u32GuestCR4 |= X86_CR4_PAE;
3942 break;
3943 }
3944
3945 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
3946 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
3947#ifdef VBOX_ENABLE_64_BITS_GUESTS
3948 break;
3949#endif
3950 default:
3951 AssertFailed();
3952 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
3953 }
3954 }
3955
3956 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
3957 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
3958 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
3959 u32GuestCR4 |= uSetCR4;
3960 u32GuestCR4 &= uZapCR4;
3961
3962 /* Write VT-x's view of the guest CR4 into the VMCS. */
3963 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", pVCpu->idCpu, u32GuestCR4, uSetCR4, uZapCR4));
3964 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4);
3965 AssertRCReturn(rc, rc);
3966
3967 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM-exit. */
3968 uint32_t u32CR4Mask = X86_CR4_VME
3969 | X86_CR4_PAE
3970 | X86_CR4_PGE
3971 | X86_CR4_PSE
3972 | X86_CR4_VMXE;
3973 if (pVM->cpum.ro.HostFeatures.fXSaveRstor)
3974 u32CR4Mask |= X86_CR4_OSXSAVE;
3975 pVCpu->hm.s.vmx.u32CR4Mask = u32CR4Mask;
3976 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask);
3977 AssertRCReturn(rc, rc);
3978
3979 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
3980 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
3981
3982 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
3983 }
3984 return rc;
3985}
3986
3987
3988/**
3989 * Loads the guest debug registers into the guest-state area in the VMCS.
3990 *
3991 * This also sets up whether \#DB and MOV DRx accesses cause VM-exits.
3992 *
3993 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
3994 *
3995 * @returns VBox status code.
3996 * @param pVCpu The cross context virtual CPU structure.
3997 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3998 * out-of-sync. Make sure to update the required fields
3999 * before using them.
4000 *
4001 * @remarks No-long-jump zone!!!
4002 */
4003static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4004{
4005 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
4006 return VINF_SUCCESS;
4007
4008#ifdef VBOX_STRICT
4009 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
4010 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
4011 {
4012 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
4013 Assert((pMixedCtx->dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0); /* Bits 63:32, 15, 14, 12, 11 are reserved. */
4014 Assert((pMixedCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); /* Bit 10 is reserved (RA1). */
4015 }
4016#endif
4017
4018 int rc;
4019 PVM pVM = pVCpu->CTX_SUFF(pVM);
4020 bool fSteppingDB = false;
4021 bool fInterceptMovDRx = false;
4022 if (pVCpu->hm.s.fSingleInstruction)
4023 {
4024 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
4025 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
4026 {
4027 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
4028 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
4029 AssertRCReturn(rc, rc);
4030 Assert(fSteppingDB == false);
4031 }
4032 else
4033 {
4034 pMixedCtx->eflags.u32 |= X86_EFL_TF;
4035 pVCpu->hm.s.fClearTrapFlag = true;
4036 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
4037 fSteppingDB = true;
4038 }
4039 }
4040
4041 if ( fSteppingDB
4042 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
4043 {
4044 /*
4045 * Use the combined guest and host DRx values found in the hypervisor
4046 * register set because the debugger has breakpoints active or someone
4047 * is single stepping on the host side without a monitor trap flag.
4048 *
4049 * Note! DBGF expects a clean DR6 state before executing guest code.
4050 */
4051#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
4052 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4053 && !CPUMIsHyperDebugStateActivePending(pVCpu))
4054 {
4055 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4056 Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
4057 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
4058 }
4059 else
4060#endif
4061 if (!CPUMIsHyperDebugStateActive(pVCpu))
4062 {
4063 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4064 Assert(CPUMIsHyperDebugStateActive(pVCpu));
4065 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
4066 }
4067
4068 /* Update DR7. (The other DRx values are handled by CPUM one way or the other.) */
4069 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)CPUMGetHyperDR7(pVCpu));
4070 AssertRCReturn(rc, rc);
4071
4072 pVCpu->hm.s.fUsingHyperDR7 = true;
4073 fInterceptMovDRx = true;
4074 }
4075 else
4076 {
4077 /*
4078 * If the guest has enabled debug registers, we need to load them prior to
4079 * executing guest code so they'll trigger at the right time.
4080 */
4081 if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
4082 {
4083#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
4084 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4085 && !CPUMIsGuestDebugStateActivePending(pVCpu))
4086 {
4087 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4088 Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
4089 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
4090 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4091 }
4092 else
4093#endif
4094 if (!CPUMIsGuestDebugStateActive(pVCpu))
4095 {
4096 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4097 Assert(CPUMIsGuestDebugStateActive(pVCpu));
4098 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
4099 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4100 }
4101 Assert(!fInterceptMovDRx);
4102 }
4103 /*
4104 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
4105 * must intercept #DB in order to maintain a correct DR6 guest value, and
4106 * because we need to intercept it to prevent nested #DBs from hanging the
4107 * CPU, we end up always having to intercept it. See hmR0VmxInitXcptBitmap.
4108 */
4109#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
4110 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu)
4111 && !CPUMIsGuestDebugStateActive(pVCpu))
4112#else
4113 else if (!CPUMIsGuestDebugStateActive(pVCpu))
4114#endif
4115 {
4116 fInterceptMovDRx = true;
4117 }
4118
4119 /* Update guest DR7. */
4120 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
4121 AssertRCReturn(rc, rc);
4122
4123 pVCpu->hm.s.fUsingHyperDR7 = false;
4124 }
4125
4126 /*
4127 * Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions.
4128 */
4129 if (fInterceptMovDRx)
4130 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4131 else
4132 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4133 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
4134 AssertRCReturn(rc, rc);
4135
4136 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
4137 return VINF_SUCCESS;
4138}
4139
4140
4141#ifdef VBOX_STRICT
4142/**
4143 * Strict function to validate segment registers.
4144 *
4145 * @remarks ASSUMES CR0 is up to date.
4146 */
4147static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4148{
4149 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
4150 /* NOTE: The reason we check for attribute value 0 and not just the unusable bit here is because hmR0VmxWriteSegmentReg()
4151 * only updates the VMCS' copy of the value with the unusable bit and doesn't change the guest-context value. */
4152 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
4153 && ( !CPUMIsGuestInRealModeEx(pCtx)
4154 && !CPUMIsGuestInV86ModeEx(pCtx)))
4155 {
4156 /* Protected mode checks */
4157 /* CS */
4158 Assert(pCtx->cs.Attr.n.u1Present);
4159 Assert(!(pCtx->cs.Attr.u & 0xf00));
4160 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
4161 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4162 || !(pCtx->cs.Attr.n.u1Granularity));
4163 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
4164 || (pCtx->cs.Attr.n.u1Granularity));
4165 /* CS cannot be loaded with NULL in protected mode. */
4166 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
4167 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4168 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
4169 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4170 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
4171 else
4172 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
4173 /* SS */
4174 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4175 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
4176 if ( !(pCtx->cr0 & X86_CR0_PE)
4177 || pCtx->cs.Attr.n.u4Type == 3)
4178 {
4179 Assert(!pCtx->ss.Attr.n.u2Dpl);
4180 }
4181 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4182 {
4183 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4184 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
4185 Assert(pCtx->ss.Attr.n.u1Present);
4186 Assert(!(pCtx->ss.Attr.u & 0xf00));
4187 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
4188 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4189 || !(pCtx->ss.Attr.n.u1Granularity));
4190 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
4191 || (pCtx->ss.Attr.n.u1Granularity));
4192 }
4193 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
4194 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4195 {
4196 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4197 Assert(pCtx->ds.Attr.n.u1Present);
4198 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
4199 Assert(!(pCtx->ds.Attr.u & 0xf00));
4200 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
4201 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4202 || !(pCtx->ds.Attr.n.u1Granularity));
4203 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
4204 || (pCtx->ds.Attr.n.u1Granularity));
4205 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4206 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
4207 }
4208 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4209 {
4210 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4211 Assert(pCtx->es.Attr.n.u1Present);
4212 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
4213 Assert(!(pCtx->es.Attr.u & 0xf00));
4214 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
4215 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
4216 || !(pCtx->es.Attr.n.u1Granularity));
4217 Assert( !(pCtx->es.u32Limit & 0xfff00000)
4218 || (pCtx->es.Attr.n.u1Granularity));
4219 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4220 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
4221 }
4222 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4223 {
4224 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4225 Assert(pCtx->fs.Attr.n.u1Present);
4226 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
4227 Assert(!(pCtx->fs.Attr.u & 0xf00));
4228 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
4229 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
4230 || !(pCtx->fs.Attr.n.u1Granularity));
4231 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
4232 || (pCtx->fs.Attr.n.u1Granularity));
4233 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4234 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4235 }
4236 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
4237 {
4238 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4239 Assert(pCtx->gs.Attr.n.u1Present);
4240 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
4241 Assert(!(pCtx->gs.Attr.u & 0xf00));
4242 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
4243 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
4244 || !(pCtx->gs.Attr.n.u1Granularity));
4245 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
4246 || (pCtx->gs.Attr.n.u1Granularity));
4247 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4248 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4249 }
4250 /* 64-bit capable CPUs. */
4251# if HC_ARCH_BITS == 64
4252 Assert(!(pCtx->cs.u64Base >> 32));
4253 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
4254 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
4255 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
4256# endif
4257 }
4258 else if ( CPUMIsGuestInV86ModeEx(pCtx)
4259 || ( CPUMIsGuestInRealModeEx(pCtx)
4260 && !pVM->hm.s.vmx.fUnrestrictedGuest))
4261 {
4262 /* Real and v86 mode checks. */
4263 /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
4264 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
4265 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4266 {
4267 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
4268 }
4269 else
4270 {
4271 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
4272 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
4273 }
4274
4275 /* CS */
4276 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
4277 Assert(pCtx->cs.u32Limit == 0xffff);
4278 Assert(u32CSAttr == 0xf3);
4279 /* SS */
4280 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
4281 Assert(pCtx->ss.u32Limit == 0xffff);
4282 Assert(u32SSAttr == 0xf3);
4283 /* DS */
4284 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
4285 Assert(pCtx->ds.u32Limit == 0xffff);
4286 Assert(u32DSAttr == 0xf3);
4287 /* ES */
4288 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
4289 Assert(pCtx->es.u32Limit == 0xffff);
4290 Assert(u32ESAttr == 0xf3);
4291 /* FS */
4292 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
4293 Assert(pCtx->fs.u32Limit == 0xffff);
4294 Assert(u32FSAttr == 0xf3);
4295 /* GS */
4296 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
4297 Assert(pCtx->gs.u32Limit == 0xffff);
4298 Assert(u32GSAttr == 0xf3);
4299 /* 64-bit capable CPUs. */
4300# if HC_ARCH_BITS == 64
4301 Assert(!(pCtx->cs.u64Base >> 32));
4302 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
4303 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
4304 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
4305# endif
4306 }
4307}
4308#endif /* VBOX_STRICT */
4309
4310
4311/**
4312 * Writes a guest segment register into the guest-state area in the VMCS.
4313 *
4314 * @returns VBox status code.
4315 * @param pVCpu The cross context virtual CPU structure.
4316 * @param idxSel Index of the selector in the VMCS.
4317 * @param idxLimit Index of the segment limit in the VMCS.
4318 * @param idxBase Index of the segment base in the VMCS.
4319 * @param idxAccess Index of the access rights of the segment in the VMCS.
4320 * @param pSelReg Pointer to the segment selector.
4321 *
4322 * @remarks No-long-jump zone!!!
4323 */
4324static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
4325 uint32_t idxAccess, PCPUMSELREG pSelReg)
4326{
4327 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
4328 AssertRCReturn(rc, rc);
4329 rc = VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
4330 AssertRCReturn(rc, rc);
4331 rc = VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
4332 AssertRCReturn(rc, rc);
4333
4334 uint32_t u32Access = pSelReg->Attr.u;
4335 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4336 {
4337 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
4338 u32Access = 0xf3;
4339 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
4340 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
4341 }
4342 else
4343 {
4344 /*
4345 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
4346 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
4347 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
4348 * loaded in protected-mode have their attribute as 0.
4349 */
4350 if (!u32Access)
4351 u32Access = X86DESCATTR_UNUSABLE;
4352 }
4353
4354 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
4355 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
4356 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
4357
4358 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
4359 AssertRCReturn(rc, rc);
4360 return rc;
4361}
4362
4363
4364/**
4365 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
4366 * into the guest-state area in the VMCS.
4367 *
4368 * @returns VBox status code.
4369 * @param pVCpu The cross context virtual CPU structure.
4370 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4371 * out-of-sync. Make sure to update the required fields
4372 * before using them.
4373 *
4374 * @remarks ASSUMES pMixedCtx->cr0 is up to date (strict builds validation).
4375 * @remarks No-long-jump zone!!!
4376 */
4377static int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4378{
4379 int rc = VERR_INTERNAL_ERROR_5;
4380 PVM pVM = pVCpu->CTX_SUFF(pVM);
4381
4382 /*
4383 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
4384 */
4385 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
4386 {
4387 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
4388 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4389 {
4390 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pMixedCtx->cs.Attr.u;
4391 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pMixedCtx->ss.Attr.u;
4392 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pMixedCtx->ds.Attr.u;
4393 pVCpu->hm.s.vmx.RealMode.AttrES.u = pMixedCtx->es.Attr.u;
4394 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pMixedCtx->fs.Attr.u;
4395 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pMixedCtx->gs.Attr.u;
4396 }
4397
4398#ifdef VBOX_WITH_REM
4399 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
4400 {
4401 Assert(pVM->hm.s.vmx.pRealModeTSS);
4402 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
4403 if ( pVCpu->hm.s.vmx.fWasInRealMode
4404 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
4405 {
4406 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
4407 in real-mode (e.g. OpenBSD 4.0) */
4408 REMFlushTBs(pVM);
4409 Log4(("Load[%RU32]: Switch to protected mode detected!\n", pVCpu->idCpu));
4410 pVCpu->hm.s.vmx.fWasInRealMode = false;
4411 }
4412 }
4413#endif
4414 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
4415 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs);
4416 AssertRCReturn(rc, rc);
4417 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
4418 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss);
4419 AssertRCReturn(rc, rc);
4420 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
4421 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds);
4422 AssertRCReturn(rc, rc);
4423 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
4424 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es);
4425 AssertRCReturn(rc, rc);
4426 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
4427 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs);
4428 AssertRCReturn(rc, rc);
4429 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
4430 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs);
4431 AssertRCReturn(rc, rc);
4432
4433#ifdef VBOX_STRICT
4434 /* Validate. */
4435 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
4436#endif
4437
4438 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
4439 Log4(("Load[%RU32]: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pVCpu->idCpu, pMixedCtx->cs.Sel,
4440 pMixedCtx->cs.u64Base, pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
4441 }
4442
4443 /*
4444 * Guest TR.
4445 */
4446 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))
4447 {
4448 /*
4449 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
4450 * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
4451 * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
4452 */
4453 uint16_t u16Sel = 0;
4454 uint32_t u32Limit = 0;
4455 uint64_t u64Base = 0;
4456 uint32_t u32AccessRights = 0;
4457
4458 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4459 {
4460 u16Sel = pMixedCtx->tr.Sel;
4461 u32Limit = pMixedCtx->tr.u32Limit;
4462 u64Base = pMixedCtx->tr.u64Base;
4463 u32AccessRights = pMixedCtx->tr.Attr.u;
4464 }
4465 else
4466 {
4467 Assert(pVM->hm.s.vmx.pRealModeTSS);
4468 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
4469
4470 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
4471 RTGCPHYS GCPhys;
4472 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
4473 AssertRCReturn(rc, rc);
4474
4475 X86DESCATTR DescAttr;
4476 DescAttr.u = 0;
4477 DescAttr.n.u1Present = 1;
4478 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
4479
4480 u16Sel = 0;
4481 u32Limit = HM_VTX_TSS_SIZE;
4482 u64Base = GCPhys; /* in real-mode phys = virt. */
4483 u32AccessRights = DescAttr.u;
4484 }
4485
4486 /* Validate. */
4487 Assert(!(u16Sel & RT_BIT(2)));
4488 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
4489 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
4490 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
4491 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
4492 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
4493 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
4494 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
4495 Assert( (u32Limit & 0xfff) == 0xfff
4496 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
4497 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
4498 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
4499
4500 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel); AssertRCReturn(rc, rc);
4501 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRCReturn(rc, rc);
4502 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRCReturn(rc, rc);
4503 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc);
4504
4505 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
4506 Log4(("Load[%RU32]: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", pVCpu->idCpu, u64Base));
4507 }
4508
4509 /*
4510 * Guest GDTR.
4511 */
4512 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))
4513 {
4514 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); AssertRCReturn(rc, rc);
4515 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); AssertRCReturn(rc, rc);
4516
4517 /* Validate. */
4518 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4519
4520 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);
4521 Log4(("Load[%RU32]: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->gdtr.pGdt));
4522 }
4523
4524 /*
4525 * Guest LDTR.
4526 */
4527 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))
4528 {
4529 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
4530 uint32_t u32Access = 0;
4531 if (!pMixedCtx->ldtr.Attr.u)
4532 u32Access = X86DESCATTR_UNUSABLE;
4533 else
4534 u32Access = pMixedCtx->ldtr.Attr.u;
4535
4536 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel); AssertRCReturn(rc, rc);
4537 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit); AssertRCReturn(rc, rc);
4538 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base); AssertRCReturn(rc, rc);
4539 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRCReturn(rc, rc);
4540
4541 /* Validate. */
4542 if (!(u32Access & X86DESCATTR_UNUSABLE))
4543 {
4544 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
4545 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
4546 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
4547 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
4548 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
4549 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
4550 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
4551 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
4552 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
4553 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
4554 }
4555
4556 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
4557 Log4(("Load[%RU32]: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->ldtr.u64Base));
4558 }
4559
4560 /*
4561 * Guest IDTR.
4562 */
4563 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))
4564 {
4565 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); AssertRCReturn(rc, rc);
4566 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); AssertRCReturn(rc, rc);
4567
4568 /* Validate. */
4569 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4570
4571 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);
4572 Log4(("Load[%RU32]: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->idtr.pIdt));
4573 }
4574
4575 return VINF_SUCCESS;
4576}
4577
4578
4579/**
4580 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
4581 * areas.
4582 *
4583 * These MSRs will automatically be loaded to the host CPU on every successful
4584 * VM-entry and stored from the host CPU on every successful VM-exit. This also
4585 * creates/updates MSR slots for the host MSRs. The actual host MSR values are
4586 * -not- updated here for performance reasons. See hmR0VmxSaveHostMsrs().
4587 *
4588 * Also loads the sysenter MSRs into the guest-state area in the VMCS.
4589 *
4590 * @returns VBox status code.
4591 * @param pVCpu The cross context virtual CPU structure.
4592 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4593 * out-of-sync. Make sure to update the required fields
4594 * before using them.
4595 *
4596 * @remarks No-long-jump zone!!!
4597 */
4598static int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4599{
4600 AssertPtr(pVCpu);
4601 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
4602
4603 /*
4604 * MSRs that we use the auto-load/store MSR area in the VMCS.
4605 */
4606 PVM pVM = pVCpu->CTX_SUFF(pVM);
4607 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS))
4608 {
4609 /* For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). */
4610#if HC_ARCH_BITS == 32
4611 if (pVM->hm.s.fAllow64BitGuests)
4612 {
4613 int rc = VINF_SUCCESS;
4614 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false, NULL);
4615 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false, NULL);
4616 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false, NULL);
4617 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false, NULL);
4618 AssertRCReturn(rc, rc);
4619# ifdef LOG_ENABLED
4620 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
4621 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
4622 {
4623 Log4(("Load[%RU32]: MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", pVCpu->idCpu, i, pMsr->u32Msr,
4624 pMsr->u64Value));
4625 }
4626# endif
4627 }
4628#endif
4629 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
4630 }
4631
4632 /*
4633 * Guest Sysenter MSRs.
4634 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
4635 * VM-exits on WRMSRs for these MSRs.
4636 */
4637 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR))
4638 {
4639 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc);
4640 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);
4641 }
4642
4643 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR))
4644 {
4645 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc);
4646 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
4647 }
4648
4649 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR))
4650 {
4651 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc);
4652 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
4653 }
4654
4655 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))
4656 {
4657 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
4658 {
4659 /*
4660 * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
4661 * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
4662 */
4663 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
4664 {
4665 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER);
4666 AssertRCReturn(rc,rc);
4667 Log4(("Load[%RU32]: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pVCpu->idCpu, pMixedCtx->msrEFER));
4668 }
4669 else
4670 {
4671 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */,
4672 NULL /* pfAddedAndUpdated */);
4673 AssertRCReturn(rc, rc);
4674
4675 /* We need to intercept reads too, see @bugref{7386#c16}. */
4676 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
4677 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
4678 Log4(("Load[%RU32]: MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", pVCpu->idCpu, MSR_K6_EFER,
4679 pMixedCtx->msrEFER, pVCpu->hm.s.vmx.cMsrs));
4680 }
4681 }
4682 else if (!pVM->hm.s.vmx.fSupportsVmcsEfer)
4683 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER);
4684 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
4685 }
4686
4687 return VINF_SUCCESS;
4688}
4689
4690
4691/**
4692 * Loads the guest activity state into the guest-state area in the VMCS.
4693 *
4694 * @returns VBox status code.
4695 * @param pVCpu The cross context virtual CPU structure.
4696 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4697 * out-of-sync. Make sure to update the required fields
4698 * before using them.
4699 *
4700 * @remarks No-long-jump zone!!!
4701 */
4702static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4703{
4704 NOREF(pMixedCtx);
4705 /** @todo See if we can make use of other states, e.g.
4706 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
4707 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE))
4708 {
4709 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
4710 AssertRCReturn(rc, rc);
4711
4712 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE);
4713 }
4714 return VINF_SUCCESS;
4715}
4716
4717
4718/**
4719 * Sets up the appropriate function to run guest code.
4720 *
4721 * @returns VBox status code.
4722 * @param pVCpu The cross context virtual CPU structure.
4723 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4724 * out-of-sync. Make sure to update the required fields
4725 * before using them.
4726 *
4727 * @remarks No-long-jump zone!!!
4728 */
4729static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4730{
4731 if (CPUMIsGuestInLongModeEx(pMixedCtx))
4732 {
4733#ifndef VBOX_ENABLE_64_BITS_GUESTS
4734 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4735#endif
4736 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
4737#if HC_ARCH_BITS == 32
4738 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
4739 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
4740 {
4741 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4742 {
4743 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4744 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_VMX_EXIT_CTLS
4745 | HM_CHANGED_VMX_ENTRY_CTLS
4746 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
4747 }
4748 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
4749 }
4750#else
4751 /* 64-bit host. */
4752 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
4753#endif
4754 }
4755 else
4756 {
4757 /* Guest is not in long mode, use the 32-bit handler. */
4758#if HC_ARCH_BITS == 32
4759 if ( pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32
4760 && pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4761 {
4762 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4763 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_VMX_EXIT_CTLS
4764 | HM_CHANGED_VMX_ENTRY_CTLS
4765 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
4766 }
4767#endif
4768 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4769 }
4770 Assert(pVCpu->hm.s.vmx.pfnStartVM);
4771 return VINF_SUCCESS;
4772}
4773
4774
4775/**
4776 * Wrapper for running the guest code in VT-x.
4777 *
4778 * @returns VBox strict status code.
4779 * @param pVM The cross context VM structure.
4780 * @param pVCpu The cross context virtual CPU structure.
4781 * @param pCtx Pointer to the guest-CPU context.
4782 *
4783 * @remarks No-long-jump zone!!!
4784 */
4785DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4786{
4787 /*
4788 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
4789 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
4790 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
4791 */
4792 bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
4793 /** @todo Add stats for resume vs launch. */
4794#ifdef VBOX_WITH_KERNEL_USING_XMM
4795 return HMR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
4796#else
4797 return pVCpu->hm.s.vmx.pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
4798#endif
4799}
4800
4801
4802/**
4803 * Reports world-switch error and dumps some useful debug info.
4804 *
4805 * @param pVM The cross context VM structure.
4806 * @param pVCpu The cross context virtual CPU structure.
4807 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
4808 * @param pCtx Pointer to the guest-CPU context.
4809 * @param pVmxTransient Pointer to the VMX transient structure (only
4810 * exitReason updated).
4811 */
4812static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
4813{
4814 Assert(pVM);
4815 Assert(pVCpu);
4816 Assert(pCtx);
4817 Assert(pVmxTransient);
4818 HMVMX_ASSERT_PREEMPT_SAFE();
4819
4820 Log4(("VM-entry failure: %Rrc\n", rcVMRun));
4821 switch (rcVMRun)
4822 {
4823 case VERR_VMX_INVALID_VMXON_PTR:
4824 AssertFailed();
4825 break;
4826 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
4827 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
4828 {
4829 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
4830 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
4831 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
4832 AssertRC(rc);
4833
4834 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
4835 /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted().
4836 Cannot do it here as we may have been long preempted. */
4837
4838#ifdef VBOX_STRICT
4839 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
4840 pVmxTransient->uExitReason));
4841 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));
4842 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
4843 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
4844 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
4845 else
4846 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
4847 Log4(("Entered host CPU %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));
4848 Log4(("Current host CPU %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));
4849
4850 /* VMX control bits. */
4851 uint32_t u32Val;
4852 uint64_t u64Val;
4853 RTHCUINTREG uHCReg;
4854 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
4855 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
4856 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
4857 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
4858 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
4859 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
4860 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
4861 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
4862 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
4863 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
4864 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
4865 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
4866 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
4867 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
4868 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
4869 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
4870 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
4871 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
4872 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
4873 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
4874 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
4875 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
4876 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4877 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
4878 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4879 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
4880 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
4881 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
4882 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
4883 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
4884 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
4885 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
4886 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
4887 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
4888 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
4889 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4890 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
4891 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
4892 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
4893 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4894 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
4895 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
4896
4897 /* Guest bits. */
4898 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc);
4899 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val));
4900 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc);
4901 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val));
4902 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
4903 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
4904 rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc);
4905 Log4(("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val));
4906
4907 /* Host bits. */
4908 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
4909 Log4(("Host CR0 %#RHr\n", uHCReg));
4910 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
4911 Log4(("Host CR3 %#RHr\n", uHCReg));
4912 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
4913 Log4(("Host CR4 %#RHr\n", uHCReg));
4914
4915 RTGDTR HostGdtr;
4916 PCX86DESCHC pDesc;
4917 ASMGetGDTR(&HostGdtr);
4918 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_CS, &u32Val); AssertRC(rc);
4919 Log4(("Host CS %#08x\n", u32Val));
4920 if (u32Val < HostGdtr.cbGdt)
4921 {
4922 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4923 HMR0DumpDescriptor(pDesc, u32Val, "CS: ");
4924 }
4925
4926 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc);
4927 Log4(("Host DS %#08x\n", u32Val));
4928 if (u32Val < HostGdtr.cbGdt)
4929 {
4930 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4931 HMR0DumpDescriptor(pDesc, u32Val, "DS: ");
4932 }
4933
4934 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc);
4935 Log4(("Host ES %#08x\n", u32Val));
4936 if (u32Val < HostGdtr.cbGdt)
4937 {
4938 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4939 HMR0DumpDescriptor(pDesc, u32Val, "ES: ");
4940 }
4941
4942 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc);
4943 Log4(("Host FS %#08x\n", u32Val));
4944 if (u32Val < HostGdtr.cbGdt)
4945 {
4946 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4947 HMR0DumpDescriptor(pDesc, u32Val, "FS: ");
4948 }
4949
4950 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc);
4951 Log4(("Host GS %#08x\n", u32Val));
4952 if (u32Val < HostGdtr.cbGdt)
4953 {
4954 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4955 HMR0DumpDescriptor(pDesc, u32Val, "GS: ");
4956 }
4957
4958 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc);
4959 Log4(("Host SS %#08x\n", u32Val));
4960 if (u32Val < HostGdtr.cbGdt)
4961 {
4962 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4963 HMR0DumpDescriptor(pDesc, u32Val, "SS: ");
4964 }
4965
4966 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc);
4967 Log4(("Host TR %#08x\n", u32Val));
4968 if (u32Val < HostGdtr.cbGdt)
4969 {
4970 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4971 HMR0DumpDescriptor(pDesc, u32Val, "TR: ");
4972 }
4973
4974 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
4975 Log4(("Host TR Base %#RHv\n", uHCReg));
4976 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
4977 Log4(("Host GDTR Base %#RHv\n", uHCReg));
4978 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
4979 Log4(("Host IDTR Base %#RHv\n", uHCReg));
4980 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
4981 Log4(("Host SYSENTER CS %#08x\n", u32Val));
4982 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
4983 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg));
4984 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
4985 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg));
4986 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
4987 Log4(("Host RSP %#RHv\n", uHCReg));
4988 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
4989 Log4(("Host RIP %#RHv\n", uHCReg));
4990# if HC_ARCH_BITS == 64
4991 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
4992 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
4993 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
4994 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
4995 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
4996 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
4997# endif
4998#endif /* VBOX_STRICT */
4999 break;
5000 }
5001
5002 default:
5003 /* Impossible */
5004 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
5005 break;
5006 }
5007 NOREF(pVM); NOREF(pCtx);
5008}
5009
5010
5011#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
5012#ifndef VMX_USE_CACHED_VMCS_ACCESSES
5013# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
5014#endif
5015#ifdef VBOX_STRICT
5016static bool hmR0VmxIsValidWriteField(uint32_t idxField)
5017{
5018 switch (idxField)
5019 {
5020 case VMX_VMCS_GUEST_RIP:
5021 case VMX_VMCS_GUEST_RSP:
5022 case VMX_VMCS_GUEST_SYSENTER_EIP:
5023 case VMX_VMCS_GUEST_SYSENTER_ESP:
5024 case VMX_VMCS_GUEST_GDTR_BASE:
5025 case VMX_VMCS_GUEST_IDTR_BASE:
5026 case VMX_VMCS_GUEST_CS_BASE:
5027 case VMX_VMCS_GUEST_DS_BASE:
5028 case VMX_VMCS_GUEST_ES_BASE:
5029 case VMX_VMCS_GUEST_FS_BASE:
5030 case VMX_VMCS_GUEST_GS_BASE:
5031 case VMX_VMCS_GUEST_SS_BASE:
5032 case VMX_VMCS_GUEST_LDTR_BASE:
5033 case VMX_VMCS_GUEST_TR_BASE:
5034 case VMX_VMCS_GUEST_CR3:
5035 return true;
5036 }
5037 return false;
5038}
5039
5040static bool hmR0VmxIsValidReadField(uint32_t idxField)
5041{
5042 switch (idxField)
5043 {
5044 /* Read-only fields. */
5045 case VMX_VMCS_RO_EXIT_QUALIFICATION:
5046 return true;
5047 }
5048 /* Remaining readable fields should also be writable. */
5049 return hmR0VmxIsValidWriteField(idxField);
5050}
5051#endif /* VBOX_STRICT */
5052
5053
5054/**
5055 * Executes the specified handler in 64-bit mode.
5056 *
5057 * @returns VBox status code.
5058 * @param pVM The cross context VM structure.
5059 * @param pVCpu The cross context virtual CPU structure.
5060 * @param pCtx Pointer to the guest CPU context.
5061 * @param enmOp The operation to perform.
5062 * @param cParams Number of parameters.
5063 * @param paParam Array of 32-bit parameters.
5064 */
5065VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp,
5066 uint32_t cParams, uint32_t *paParam)
5067{
5068 NOREF(pCtx);
5069
5070 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
5071 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
5072 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
5073 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
5074
5075#ifdef VBOX_STRICT
5076 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
5077 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
5078
5079 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
5080 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
5081#endif
5082
5083 /* Disable interrupts. */
5084 RTCCUINTREG fOldEFlags = ASMIntDisableFlags();
5085
5086#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
5087 RTCPUID idHostCpu = RTMpCpuId();
5088 CPUMR0SetLApic(pVCpu, idHostCpu);
5089#endif
5090
5091 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
5092 RTHCPHYS HCPhysCpuPage = pCpu->HCPhysMemObj;
5093
5094 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
5095 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5096
5097 /* Leave VMX Root Mode. */
5098 VMXDisable();
5099
5100 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
5101
5102 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
5103 CPUMSetHyperEIP(pVCpu, enmOp);
5104 for (int i = (int)cParams - 1; i >= 0; i--)
5105 CPUMPushHyper(pVCpu, paParam[i]);
5106
5107 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
5108
5109 /* Call the switcher. */
5110 int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
5111 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
5112
5113 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
5114 /* Make sure the VMX instructions don't cause #UD faults. */
5115 SUPR0ChangeCR4(X86_CR4_VMXE, ~0);
5116
5117 /* Re-enter VMX Root Mode */
5118 int rc2 = VMXEnable(HCPhysCpuPage);
5119 if (RT_FAILURE(rc2))
5120 {
5121 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
5122 ASMSetFlags(fOldEFlags);
5123 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
5124 return rc2;
5125 }
5126
5127 rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5128 AssertRC(rc2);
5129 Assert(!(ASMGetFlags() & X86_EFL_IF));
5130 ASMSetFlags(fOldEFlags);
5131 return rc;
5132}
5133
5134
5135/**
5136 * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts
5137 * supporting 64-bit guests.
5138 *
5139 * @returns VBox status code.
5140 * @param fResume Whether to VMLAUNCH or VMRESUME.
5141 * @param pCtx Pointer to the guest-CPU context.
5142 * @param pCache Pointer to the VMCS cache.
5143 * @param pVM The cross context VM structure.
5144 * @param pVCpu The cross context virtual CPU structure.
5145 */
5146DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
5147{
5148 NOREF(fResume);
5149
5150 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
5151 RTHCPHYS HCPhysCpuPage = pCpu->HCPhysMemObj;
5152
5153#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5154 pCache->uPos = 1;
5155 pCache->interPD = PGMGetInterPaeCR3(pVM);
5156 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
5157#endif
5158
5159#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5160 pCache->TestIn.HCPhysCpuPage = 0;
5161 pCache->TestIn.HCPhysVmcs = 0;
5162 pCache->TestIn.pCache = 0;
5163 pCache->TestOut.HCPhysVmcs = 0;
5164 pCache->TestOut.pCache = 0;
5165 pCache->TestOut.pCtx = 0;
5166 pCache->TestOut.eflags = 0;
5167#else
5168 NOREF(pCache);
5169#endif
5170
5171 uint32_t aParam[10];
5172 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
5173 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
5174 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
5175 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
5176 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
5177 aParam[5] = 0;
5178 aParam[6] = VM_RC_ADDR(pVM, pVM);
5179 aParam[7] = 0;
5180 aParam[8] = VM_RC_ADDR(pVM, pVCpu);
5181 aParam[9] = 0;
5182
5183#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5184 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
5185 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
5186#endif
5187 int rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]);
5188
5189#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5190 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
5191 Assert(pCtx->dr[4] == 10);
5192 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
5193#endif
5194
5195#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5196 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
5197 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5198 pVCpu->hm.s.vmx.HCPhysVmcs));
5199 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5200 pCache->TestOut.HCPhysVmcs));
5201 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
5202 pCache->TestOut.pCache));
5203 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
5204 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
5205 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
5206 pCache->TestOut.pCtx));
5207 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
5208#endif
5209 return rc;
5210}
5211
5212
5213/**
5214 * Initialize the VMCS-Read cache.
5215 *
5216 * The VMCS cache is used for 32-bit hosts running 64-bit guests (except 32-bit
5217 * Darwin which runs with 64-bit paging in 32-bit mode) for 64-bit fields that
5218 * cannot be accessed in 32-bit mode. Some 64-bit fields -can- be accessed
5219 * (those that have a 32-bit FULL & HIGH part).
5220 *
5221 * @returns VBox status code.
5222 * @param pVM The cross context VM structure.
5223 * @param pVCpu The cross context virtual CPU structure.
5224 */
5225static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
5226{
5227#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
5228{ \
5229 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
5230 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
5231 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
5232 ++cReadFields; \
5233}
5234
5235 AssertPtr(pVM);
5236 AssertPtr(pVCpu);
5237 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5238 uint32_t cReadFields = 0;
5239
5240 /*
5241 * Don't remove the #if 0'd fields in this code. They're listed here for consistency
5242 * and serve to indicate exceptions to the rules.
5243 */
5244
5245 /* Guest-natural selector base fields. */
5246#if 0
5247 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
5248 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
5249 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
5250#endif
5251 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
5252 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
5253 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
5254 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
5255 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
5256 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
5257 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
5258 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
5259 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
5260 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
5261 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
5262 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
5263#if 0
5264 /* Unused natural width guest-state fields. */
5265 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
5266 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
5267#endif
5268 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
5269 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
5270
5271 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
5272#if 0
5273 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
5274 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
5275 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
5276 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
5277 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
5278 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
5279 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
5280 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
5281 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
5282#endif
5283
5284 /* Natural width guest-state fields. */
5285 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
5286#if 0
5287 /* Currently unused field. */
5288 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
5289#endif
5290
5291 if (pVM->hm.s.fNestedPaging)
5292 {
5293 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
5294 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
5295 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
5296 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
5297 }
5298 else
5299 {
5300 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
5301 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
5302 }
5303
5304#undef VMXLOCAL_INIT_READ_CACHE_FIELD
5305 return VINF_SUCCESS;
5306}
5307
5308
5309/**
5310 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
5311 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
5312 * darwin, running 64-bit guests).
5313 *
5314 * @returns VBox status code.
5315 * @param pVCpu The cross context virtual CPU structure.
5316 * @param idxField The VMCS field encoding.
5317 * @param u64Val 16, 32 or 64-bit value.
5318 */
5319VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5320{
5321 int rc;
5322 switch (idxField)
5323 {
5324 /*
5325 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
5326 */
5327 /* 64-bit Control fields. */
5328 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
5329 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
5330 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
5331 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
5332 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
5333 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
5334 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
5335 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
5336 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
5337 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
5338 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
5339 case VMX_VMCS64_CTRL_EPTP_FULL:
5340 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
5341 /* 64-bit Guest-state fields. */
5342 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
5343 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
5344 case VMX_VMCS64_GUEST_PAT_FULL:
5345 case VMX_VMCS64_GUEST_EFER_FULL:
5346 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
5347 case VMX_VMCS64_GUEST_PDPTE0_FULL:
5348 case VMX_VMCS64_GUEST_PDPTE1_FULL:
5349 case VMX_VMCS64_GUEST_PDPTE2_FULL:
5350 case VMX_VMCS64_GUEST_PDPTE3_FULL:
5351 /* 64-bit Host-state fields. */
5352 case VMX_VMCS64_HOST_FIELD_PAT_FULL:
5353 case VMX_VMCS64_HOST_FIELD_EFER_FULL:
5354 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
5355 {
5356 rc = VMXWriteVmcs32(idxField, u64Val);
5357 rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32));
5358 break;
5359 }
5360
5361 /*
5362 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
5363 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
5364 */
5365 /* Natural-width Guest-state fields. */
5366 case VMX_VMCS_GUEST_CR3:
5367 case VMX_VMCS_GUEST_ES_BASE:
5368 case VMX_VMCS_GUEST_CS_BASE:
5369 case VMX_VMCS_GUEST_SS_BASE:
5370 case VMX_VMCS_GUEST_DS_BASE:
5371 case VMX_VMCS_GUEST_FS_BASE:
5372 case VMX_VMCS_GUEST_GS_BASE:
5373 case VMX_VMCS_GUEST_LDTR_BASE:
5374 case VMX_VMCS_GUEST_TR_BASE:
5375 case VMX_VMCS_GUEST_GDTR_BASE:
5376 case VMX_VMCS_GUEST_IDTR_BASE:
5377 case VMX_VMCS_GUEST_RSP:
5378 case VMX_VMCS_GUEST_RIP:
5379 case VMX_VMCS_GUEST_SYSENTER_ESP:
5380 case VMX_VMCS_GUEST_SYSENTER_EIP:
5381 {
5382 if (!(u64Val >> 32))
5383 {
5384 /* If this field is 64-bit, VT-x will zero out the top bits. */
5385 rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
5386 }
5387 else
5388 {
5389 /* Assert that only the 32->64 switcher case should ever come here. */
5390 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
5391 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
5392 }
5393 break;
5394 }
5395
5396 default:
5397 {
5398 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
5399 rc = VERR_INVALID_PARAMETER;
5400 break;
5401 }
5402 }
5403 AssertRCReturn(rc, rc);
5404 return rc;
5405}
5406
5407
5408/**
5409 * Queue up a VMWRITE by using the VMCS write cache.
5410 * This is only used on 32-bit hosts (except darwin) for 64-bit guests.
5411 *
5412 * @param pVCpu The cross context virtual CPU structure.
5413 * @param idxField The VMCS field encoding.
5414 * @param u64Val 16, 32 or 64-bit value.
5415 */
5416VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5417{
5418 AssertPtr(pVCpu);
5419 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5420
5421 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
5422 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
5423
5424 /* Make sure there are no duplicates. */
5425 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
5426 {
5427 if (pCache->Write.aField[i] == idxField)
5428 {
5429 pCache->Write.aFieldVal[i] = u64Val;
5430 return VINF_SUCCESS;
5431 }
5432 }
5433
5434 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
5435 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
5436 pCache->Write.cValidEntries++;
5437 return VINF_SUCCESS;
5438}
5439#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
5440
5441
5442/**
5443 * Sets up the usage of TSC-offsetting and updates the VMCS.
5444 *
5445 * If offsetting is not possible, cause VM-exits on RDTSC(P)s. Also sets up the
5446 * VMX preemption timer.
5447 *
5448 * @returns VBox status code.
5449 * @param pVM The cross context VM structure.
5450 * @param pVCpu The cross context virtual CPU structure.
5451 *
5452 * @remarks No-long-jump zone!!!
5453 */
5454static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVM pVM, PVMCPU pVCpu)
5455{
5456 int rc;
5457 bool fOffsettedTsc;
5458 bool fParavirtTsc;
5459 if (pVM->hm.s.vmx.fUsePreemptTimer)
5460 {
5461 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset,
5462 &fOffsettedTsc, &fParavirtTsc);
5463
5464 /* Make sure the returned values have sane upper and lower boundaries. */
5465 uint64_t u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
5466 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
5467 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
5468 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
5469
5470 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
5471 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);
5472 }
5473 else
5474 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc);
5475
5476 /** @todo later optimize this to be done elsewhere and not before every
5477 * VM-entry. */
5478 if (fParavirtTsc)
5479 {
5480 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
5481 information before every VM-entry, hence disable it for performance sake. */
5482#if 0
5483 rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
5484 AssertRC(rc);
5485#endif
5486 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
5487 }
5488
5489 if (fOffsettedTsc)
5490 {
5491 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
5492 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
5493
5494 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5495 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5496 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
5497 }
5498 else
5499 {
5500 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
5501 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5502 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5503 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
5504 }
5505}
5506
5507
5508/**
5509 * Determines if an exception is a contributory exception.
5510 *
5511 * Contributory exceptions are ones which can cause double-faults unless the
5512 * original exception was a benign exception. Page-fault is intentionally not
5513 * included here as it's a conditional contributory exception.
5514 *
5515 * @returns true if the exception is contributory, false otherwise.
5516 * @param uVector The exception vector.
5517 */
5518DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
5519{
5520 switch (uVector)
5521 {
5522 case X86_XCPT_GP:
5523 case X86_XCPT_SS:
5524 case X86_XCPT_NP:
5525 case X86_XCPT_TS:
5526 case X86_XCPT_DE:
5527 return true;
5528 default:
5529 break;
5530 }
5531 return false;
5532}
5533
5534
5535/**
5536 * Sets an event as a pending event to be injected into the guest.
5537 *
5538 * @param pVCpu The cross context virtual CPU structure.
5539 * @param u32IntInfo The VM-entry interruption-information field.
5540 * @param cbInstr The VM-entry instruction length in bytes (for software
5541 * interrupts, exceptions and privileged software
5542 * exceptions).
5543 * @param u32ErrCode The VM-entry exception error code.
5544 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
5545 * page-fault.
5546 *
5547 * @remarks Statistics counter assumes this is a guest event being injected or
5548 * re-injected into the guest, i.e. 'StatInjectPendingReflect' is
5549 * always incremented.
5550 */
5551DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
5552 RTGCUINTPTR GCPtrFaultAddress)
5553{
5554 Assert(!pVCpu->hm.s.Event.fPending);
5555 pVCpu->hm.s.Event.fPending = true;
5556 pVCpu->hm.s.Event.u64IntInfo = u32IntInfo;
5557 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
5558 pVCpu->hm.s.Event.cbInstr = cbInstr;
5559 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
5560
5561 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
5562}
5563
5564
5565/**
5566 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
5567 *
5568 * @param pVCpu The cross context virtual CPU structure.
5569 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5570 * out-of-sync. Make sure to update the required fields
5571 * before using them.
5572 */
5573DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5574{
5575 NOREF(pMixedCtx);
5576 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
5577 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5578 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5579 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5580}
5581
5582
5583/**
5584 * Handle a condition that occurred while delivering an event through the guest
5585 * IDT.
5586 *
5587 * @returns VBox status code (informational error codes included).
5588 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5589 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought to
5590 * continue execution of the guest which will delivery the \#DF.
5591 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5592 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5593 *
5594 * @param pVCpu The cross context virtual CPU structure.
5595 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5596 * out-of-sync. Make sure to update the required fields
5597 * before using them.
5598 * @param pVmxTransient Pointer to the VMX transient structure.
5599 *
5600 * @remarks No-long-jump zone!!!
5601 */
5602static int hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
5603{
5604 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
5605
5606 int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
5607 AssertRCReturn(rc, rc);
5608 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
5609 AssertRCReturn(rc, rc);
5610
5611 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
5612 {
5613 uint32_t uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
5614 uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
5615
5616 typedef enum
5617 {
5618 VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
5619 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
5620 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
5621 VMXREFLECTXCPT_HANG, /* Indicate bad VM trying to deadlock the CPU. */
5622 VMXREFLECTXCPT_NONE /* Nothing to reflect. */
5623 } VMXREFLECTXCPT;
5624
5625 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
5626 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
5627 if (VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo))
5628 {
5629 if (uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
5630 {
5631 enmReflect = VMXREFLECTXCPT_XCPT;
5632#ifdef VBOX_STRICT
5633 if ( hmR0VmxIsContributoryXcpt(uIdtVector)
5634 && uExitVector == X86_XCPT_PF)
5635 {
5636 Log4(("IDT: vcpu[%RU32] Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5637 }
5638#endif
5639 if ( uExitVector == X86_XCPT_PF
5640 && uIdtVector == X86_XCPT_PF)
5641 {
5642 pVmxTransient->fVectoringDoublePF = true;
5643 Log4(("IDT: vcpu[%RU32] Vectoring Double #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5644 }
5645 else if ( uExitVector == X86_XCPT_AC
5646 && uIdtVector == X86_XCPT_AC)
5647 {
5648 enmReflect = VMXREFLECTXCPT_HANG;
5649 Log4(("IDT: Nested #AC - Bad guest\n"));
5650 }
5651 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
5652 && hmR0VmxIsContributoryXcpt(uExitVector)
5653 && ( hmR0VmxIsContributoryXcpt(uIdtVector)
5654 || uIdtVector == X86_XCPT_PF))
5655 {
5656 enmReflect = VMXREFLECTXCPT_DF;
5657 }
5658 else if (uIdtVector == X86_XCPT_DF)
5659 enmReflect = VMXREFLECTXCPT_TF;
5660 }
5661 else if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
5662 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
5663 {
5664 /*
5665 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and
5666 * privileged software exception (#DB from ICEBP) as they reoccur when restarting the instruction.
5667 */
5668 enmReflect = VMXREFLECTXCPT_XCPT;
5669
5670 if (uExitVector == X86_XCPT_PF)
5671 {
5672 pVmxTransient->fVectoringPF = true;
5673 Log4(("IDT: vcpu[%RU32] Vectoring #PF due to Ext-Int/NMI. uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5674 }
5675 }
5676 }
5677 else if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5678 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
5679 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
5680 {
5681 /*
5682 * If event delivery caused an EPT violation/misconfig or APIC access VM-exit, then the VM-exit
5683 * interruption-information will not be valid as it's not an exception and we end up here. In such cases,
5684 * it is sufficient to reflect the original exception to the guest after handling the VM-exit.
5685 */
5686 enmReflect = VMXREFLECTXCPT_XCPT;
5687 }
5688
5689 /*
5690 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig etc.) occurred
5691 * while delivering the NMI, we need to clear the block-by-NMI field in the guest interruptibility-state before
5692 * re-delivering the NMI after handling the VM-exit. Otherwise the subsequent VM-entry would fail.
5693 *
5694 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}.
5695 */
5696 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5697 && enmReflect == VMXREFLECTXCPT_XCPT
5698 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
5699 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
5700 {
5701 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
5702 }
5703
5704 switch (enmReflect)
5705 {
5706 case VMXREFLECTXCPT_XCPT:
5707 {
5708 Assert( uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5709 && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5710 && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
5711
5712 uint32_t u32ErrCode = 0;
5713 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo))
5714 {
5715 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
5716 AssertRCReturn(rc, rc);
5717 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5718 }
5719
5720 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
5721 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
5722 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
5723 rc = VINF_SUCCESS;
5724 Log4(("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu,
5725 pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.u32ErrCode));
5726
5727 break;
5728 }
5729
5730 case VMXREFLECTXCPT_DF:
5731 {
5732 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
5733 rc = VINF_HM_DOUBLE_FAULT;
5734 Log4(("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,
5735 pVCpu->hm.s.Event.u64IntInfo, uIdtVector, uExitVector));
5736
5737 break;
5738 }
5739
5740 case VMXREFLECTXCPT_TF:
5741 {
5742 rc = VINF_EM_RESET;
5743 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector,
5744 uExitVector));
5745 break;
5746 }
5747
5748 case VMXREFLECTXCPT_HANG:
5749 {
5750 rc = VERR_EM_GUEST_CPU_HANG;
5751 break;
5752 }
5753
5754 default:
5755 Assert(rc == VINF_SUCCESS);
5756 break;
5757 }
5758 }
5759 else if ( VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo)
5760 && VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(pVmxTransient->uExitIntInfo)
5761 && uExitVector != X86_XCPT_DF
5762 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
5763 {
5764 /*
5765 * Execution of IRET caused this fault when NMI blocking was in effect (i.e we're in the guest NMI handler).
5766 * We need to set the block-by-NMI field so that NMIs remain blocked until the IRET execution is restarted.
5767 * See Intel spec. 30.7.1.2 "Resuming guest software after handling an exception".
5768 */
5769 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
5770 {
5771 Log4(("hmR0VmxCheckExitDueToEventDelivery: vcpu[%RU32] Setting VMCPU_FF_BLOCK_NMIS. Valid=%RTbool uExitReason=%u\n",
5772 pVCpu->idCpu, VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason));
5773 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5774 }
5775 }
5776
5777 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET || rc == VERR_EM_GUEST_CPU_HANG);
5778 return rc;
5779}
5780
5781
5782/**
5783 * Saves the guest's CR0 register from the VMCS into the guest-CPU context.
5784 *
5785 * @returns VBox status code.
5786 * @param pVCpu The cross context virtual CPU structure.
5787 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5788 * out-of-sync. Make sure to update the required fields
5789 * before using them.
5790 *
5791 * @remarks No-long-jump zone!!!
5792 */
5793static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5794{
5795 NOREF(pMixedCtx);
5796
5797 /*
5798 * While in the middle of saving guest-CR0, we could get preempted and re-invoked from the preemption hook,
5799 * see hmR0VmxLeave(). Safer to just make this code non-preemptible.
5800 */
5801 VMMRZCallRing3Disable(pVCpu);
5802 HM_DISABLE_PREEMPT();
5803
5804 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0))
5805 {
5806 uint32_t uVal = 0;
5807 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal);
5808 AssertRCReturn(rc, rc);
5809
5810 uint32_t uShadow = 0;
5811 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
5812 AssertRCReturn(rc, rc);
5813
5814 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR0Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR0Mask);
5815 CPUMSetGuestCR0(pVCpu, uVal);
5816 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0);
5817 }
5818
5819 HM_RESTORE_PREEMPT();
5820 VMMRZCallRing3Enable(pVCpu);
5821 return VINF_SUCCESS;
5822}
5823
5824
5825/**
5826 * Saves the guest's CR4 register from the VMCS into the guest-CPU context.
5827 *
5828 * @returns VBox status code.
5829 * @param pVCpu The cross context virtual CPU structure.
5830 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5831 * out-of-sync. Make sure to update the required fields
5832 * before using them.
5833 *
5834 * @remarks No-long-jump zone!!!
5835 */
5836static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5837{
5838 NOREF(pMixedCtx);
5839
5840 int rc = VINF_SUCCESS;
5841 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4))
5842 {
5843 uint32_t uVal = 0;
5844 uint32_t uShadow = 0;
5845 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal);
5846 AssertRCReturn(rc, rc);
5847 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
5848 AssertRCReturn(rc, rc);
5849
5850 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR4Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR4Mask);
5851 CPUMSetGuestCR4(pVCpu, uVal);
5852 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4);
5853 }
5854 return rc;
5855}
5856
5857
5858/**
5859 * Saves the guest's RIP register from the VMCS into the guest-CPU context.
5860 *
5861 * @returns VBox status code.
5862 * @param pVCpu The cross context virtual CPU structure.
5863 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5864 * out-of-sync. Make sure to update the required fields
5865 * before using them.
5866 *
5867 * @remarks No-long-jump zone!!!
5868 */
5869static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5870{
5871 int rc = VINF_SUCCESS;
5872 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP))
5873 {
5874 uint64_t u64Val = 0;
5875 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
5876 AssertRCReturn(rc, rc);
5877
5878 pMixedCtx->rip = u64Val;
5879 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP);
5880 }
5881 return rc;
5882}
5883
5884
5885/**
5886 * Saves the guest's RSP register from the VMCS into the guest-CPU context.
5887 *
5888 * @returns VBox status code.
5889 * @param pVCpu The cross context virtual CPU structure.
5890 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5891 * out-of-sync. Make sure to update the required fields
5892 * before using them.
5893 *
5894 * @remarks No-long-jump zone!!!
5895 */
5896static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5897{
5898 int rc = VINF_SUCCESS;
5899 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP))
5900 {
5901 uint64_t u64Val = 0;
5902 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
5903 AssertRCReturn(rc, rc);
5904
5905 pMixedCtx->rsp = u64Val;
5906 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP);
5907 }
5908 return rc;
5909}
5910
5911
5912/**
5913 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
5914 *
5915 * @returns VBox status code.
5916 * @param pVCpu The cross context virtual CPU structure.
5917 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5918 * out-of-sync. Make sure to update the required fields
5919 * before using them.
5920 *
5921 * @remarks No-long-jump zone!!!
5922 */
5923static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5924{
5925 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS))
5926 {
5927 uint32_t uVal = 0;
5928 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal);
5929 AssertRCReturn(rc, rc);
5930
5931 pMixedCtx->eflags.u32 = uVal;
5932 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) /* Undo our real-on-v86-mode changes to eflags if necessary. */
5933 {
5934 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
5935 Log4(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32));
5936
5937 pMixedCtx->eflags.Bits.u1VM = 0;
5938 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
5939 }
5940
5941 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS);
5942 }
5943 return VINF_SUCCESS;
5944}
5945
5946
5947/**
5948 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
5949 * guest-CPU context.
5950 */
5951DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5952{
5953 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
5954 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
5955 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
5956 return rc;
5957}
5958
5959
5960/**
5961 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
5962 * from the guest-state area in the VMCS.
5963 *
5964 * @param pVCpu The cross context virtual CPU structure.
5965 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5966 * out-of-sync. Make sure to update the required fields
5967 * before using them.
5968 *
5969 * @remarks No-long-jump zone!!!
5970 */
5971static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5972{
5973 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE))
5974 {
5975 uint32_t uIntrState = 0;
5976 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
5977 AssertRC(rc);
5978
5979 if (!uIntrState)
5980 {
5981 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
5982 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5983
5984 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
5985 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
5986 }
5987 else
5988 {
5989 if (uIntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
5990 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI))
5991 {
5992 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
5993 AssertRC(rc);
5994 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */
5995 AssertRC(rc);
5996
5997 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
5998 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
5999 }
6000 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6001 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6002
6003 if (uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)
6004 {
6005 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6006 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6007 }
6008 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6009 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6010 }
6011
6012 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE);
6013 }
6014}
6015
6016
6017/**
6018 * Saves the guest's activity state.
6019 *
6020 * @returns VBox status code.
6021 * @param pVCpu The cross context virtual CPU structure.
6022 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6023 * out-of-sync. Make sure to update the required fields
6024 * before using them.
6025 *
6026 * @remarks No-long-jump zone!!!
6027 */
6028static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6029{
6030 NOREF(pMixedCtx);
6031 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
6032 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_ACTIVITY_STATE);
6033 return VINF_SUCCESS;
6034}
6035
6036
6037/**
6038 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
6039 * the current VMCS into the guest-CPU context.
6040 *
6041 * @returns VBox status code.
6042 * @param pVCpu The cross context virtual CPU structure.
6043 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6044 * out-of-sync. Make sure to update the required fields
6045 * before using them.
6046 *
6047 * @remarks No-long-jump zone!!!
6048 */
6049static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6050{
6051 int rc = VINF_SUCCESS;
6052 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
6053 {
6054 uint32_t u32Val = 0;
6055 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc);
6056 pMixedCtx->SysEnter.cs = u32Val;
6057 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR);
6058 }
6059
6060 uint64_t u64Val = 0;
6061 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
6062 {
6063 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &u64Val); AssertRCReturn(rc, rc);
6064 pMixedCtx->SysEnter.eip = u64Val;
6065 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR);
6066 }
6067 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
6068 {
6069 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &u64Val); AssertRCReturn(rc, rc);
6070 pMixedCtx->SysEnter.esp = u64Val;
6071 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR);
6072 }
6073 return rc;
6074}
6075
6076
6077/**
6078 * Saves the set of guest MSRs (that we restore lazily while leaving VT-x) from
6079 * the CPU back into the guest-CPU context.
6080 *
6081 * @returns VBox status code.
6082 * @param pVCpu The cross context virtual CPU structure.
6083 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6084 * out-of-sync. Make sure to update the required fields
6085 * before using them.
6086 *
6087 * @remarks No-long-jump zone!!!
6088 */
6089static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6090{
6091#if HC_ARCH_BITS == 64
6092 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
6093 {
6094 /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */
6095 VMMRZCallRing3Disable(pVCpu);
6096 HM_DISABLE_PREEMPT();
6097
6098 /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */
6099 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS))
6100 {
6101 hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx);
6102 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6103 }
6104
6105 HM_RESTORE_PREEMPT();
6106 VMMRZCallRing3Enable(pVCpu);
6107 }
6108 else
6109 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6110#else
6111 NOREF(pMixedCtx);
6112 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6113#endif
6114
6115 return VINF_SUCCESS;
6116}
6117
6118
6119/**
6120 * Saves the auto load/store'd guest MSRs from the current VMCS into
6121 * the guest-CPU context.
6122 *
6123 * @returns VBox status code.
6124 * @param pVCpu The cross context virtual CPU structure.
6125 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6126 * out-of-sync. Make sure to update the required fields
6127 * before using them.
6128 *
6129 * @remarks No-long-jump zone!!!
6130 */
6131static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6132{
6133 if (HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS))
6134 return VINF_SUCCESS;
6135
6136 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
6137 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
6138 Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", cMsrs));
6139 for (uint32_t i = 0; i < cMsrs; i++, pMsr++)
6140 {
6141 switch (pMsr->u32Msr)
6142 {
6143 case MSR_K8_TSC_AUX: CPUMR0SetGuestTscAux(pVCpu, pMsr->u64Value); break;
6144 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;
6145 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;
6146 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;
6147 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;
6148 case MSR_K6_EFER: /* Nothing to do here since we intercept writes, see hmR0VmxLoadGuestMsrs(). */
6149 break;
6150
6151 default:
6152 {
6153 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, cMsrs));
6154 pVCpu->hm.s.u32HMError = pMsr->u32Msr;
6155 return VERR_HM_UNEXPECTED_LD_ST_MSR;
6156 }
6157 }
6158 }
6159
6160 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS);
6161 return VINF_SUCCESS;
6162}
6163
6164
6165/**
6166 * Saves the guest control registers from the current VMCS into the guest-CPU
6167 * context.
6168 *
6169 * @returns VBox status code.
6170 * @param pVCpu The cross context virtual CPU structure.
6171 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6172 * out-of-sync. Make sure to update the required fields
6173 * before using them.
6174 *
6175 * @remarks No-long-jump zone!!!
6176 */
6177static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6178{
6179 /* Guest CR0. Guest FPU. */
6180 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6181 AssertRCReturn(rc, rc);
6182
6183 /* Guest CR4. */
6184 rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
6185 AssertRCReturn(rc, rc);
6186
6187 /* Guest CR2 - updated always during the world-switch or in #PF. */
6188 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
6189 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3))
6190 {
6191 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
6192 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4));
6193
6194 PVM pVM = pVCpu->CTX_SUFF(pVM);
6195 if ( pVM->hm.s.vmx.fUnrestrictedGuest
6196 || ( pVM->hm.s.fNestedPaging
6197 && CPUMIsGuestPagingEnabledEx(pMixedCtx)))
6198 {
6199 uint64_t u64Val = 0;
6200 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
6201 if (pMixedCtx->cr3 != u64Val)
6202 {
6203 CPUMSetGuestCR3(pVCpu, u64Val);
6204 if (VMMRZCallRing3IsEnabled(pVCpu))
6205 {
6206 PGMUpdateCR3(pVCpu, u64Val);
6207 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6208 }
6209 else
6210 {
6211 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
6212 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
6213 }
6214 }
6215
6216 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
6217 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */
6218 {
6219 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
6220 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
6221 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
6222 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
6223
6224 if (VMMRZCallRing3IsEnabled(pVCpu))
6225 {
6226 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6227 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6228 }
6229 else
6230 {
6231 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
6232 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
6233 }
6234 }
6235 }
6236
6237 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3);
6238 }
6239
6240 /*
6241 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
6242 * -> VMMRZCallRing3Disable() -> hmR0VmxSaveGuestState() -> Set VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
6243 * -> continue with VM-exit handling -> hmR0VmxSaveGuestControlRegs() and here we are.
6244 *
6245 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
6246 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
6247 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
6248 * -NOT- check if HMVMX_UPDATED_GUEST_CR3 is already set or not!
6249 *
6250 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
6251 */
6252 if (VMMRZCallRing3IsEnabled(pVCpu))
6253 {
6254 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6255 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
6256
6257 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6258 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6259
6260 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6261 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6262 }
6263
6264 return rc;
6265}
6266
6267
6268/**
6269 * Reads a guest segment register from the current VMCS into the guest-CPU
6270 * context.
6271 *
6272 * @returns VBox status code.
6273 * @param pVCpu The cross context virtual CPU structure.
6274 * @param idxSel Index of the selector in the VMCS.
6275 * @param idxLimit Index of the segment limit in the VMCS.
6276 * @param idxBase Index of the segment base in the VMCS.
6277 * @param idxAccess Index of the access rights of the segment in the VMCS.
6278 * @param pSelReg Pointer to the segment selector.
6279 *
6280 * @remarks No-long-jump zone!!!
6281 * @remarks Never call this function directly!!! Use the VMXLOCAL_READ_SEG()
6282 * macro as that takes care of whether to read from the VMCS cache or
6283 * not.
6284 */
6285DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
6286 PCPUMSELREG pSelReg)
6287{
6288 NOREF(pVCpu);
6289
6290 uint32_t u32Val = 0;
6291 int rc = VMXReadVmcs32(idxSel, &u32Val);
6292 AssertRCReturn(rc, rc);
6293 pSelReg->Sel = (uint16_t)u32Val;
6294 pSelReg->ValidSel = (uint16_t)u32Val;
6295 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6296
6297 rc = VMXReadVmcs32(idxLimit, &u32Val);
6298 AssertRCReturn(rc, rc);
6299 pSelReg->u32Limit = u32Val;
6300
6301 uint64_t u64Val = 0;
6302 rc = VMXReadVmcsGstNByIdxVal(idxBase, &u64Val);
6303 AssertRCReturn(rc, rc);
6304 pSelReg->u64Base = u64Val;
6305
6306 rc = VMXReadVmcs32(idxAccess, &u32Val);
6307 AssertRCReturn(rc, rc);
6308 pSelReg->Attr.u = u32Val;
6309
6310 /*
6311 * If VT-x marks the segment as unusable, most other bits remain undefined:
6312 * - For CS the L, D and G bits have meaning.
6313 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
6314 * - For the remaining data segments no bits are defined.
6315 *
6316 * The present bit and the unusable bit has been observed to be set at the
6317 * same time (the selector was supposed to be invalid as we started executing
6318 * a V8086 interrupt in ring-0).
6319 *
6320 * What should be important for the rest of the VBox code, is that the P bit is
6321 * cleared. Some of the other VBox code recognizes the unusable bit, but
6322 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
6323 * safe side here, we'll strip off P and other bits we don't care about. If
6324 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
6325 *
6326 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
6327 */
6328 if (pSelReg->Attr.u & X86DESCATTR_UNUSABLE)
6329 {
6330 Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR); /* TR is the only selector that can never be unusable. */
6331
6332 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
6333 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
6334 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
6335
6336 Log4(("hmR0VmxReadSegmentReg: Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Val, pSelReg->Attr.u));
6337#ifdef DEBUG_bird
6338 AssertMsg((u32Val & ~X86DESCATTR_P) == pSelReg->Attr.u,
6339 ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
6340 idxSel, u32Val, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
6341#endif
6342 }
6343 return VINF_SUCCESS;
6344}
6345
6346
6347#ifdef VMX_USE_CACHED_VMCS_ACCESSES
6348# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
6349 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
6350 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
6351#else
6352# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
6353 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
6354 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
6355#endif
6356
6357
6358/**
6359 * Saves the guest segment registers from the current VMCS into the guest-CPU
6360 * context.
6361 *
6362 * @returns VBox status code.
6363 * @param pVCpu The cross context virtual CPU structure.
6364 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6365 * out-of-sync. Make sure to update the required fields
6366 * before using them.
6367 *
6368 * @remarks No-long-jump zone!!!
6369 */
6370static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6371{
6372 /* Guest segment registers. */
6373 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS))
6374 {
6375 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); AssertRCReturn(rc, rc);
6376 rc = VMXLOCAL_READ_SEG(CS, cs); AssertRCReturn(rc, rc);
6377 rc = VMXLOCAL_READ_SEG(SS, ss); AssertRCReturn(rc, rc);
6378 rc = VMXLOCAL_READ_SEG(DS, ds); AssertRCReturn(rc, rc);
6379 rc = VMXLOCAL_READ_SEG(ES, es); AssertRCReturn(rc, rc);
6380 rc = VMXLOCAL_READ_SEG(FS, fs); AssertRCReturn(rc, rc);
6381 rc = VMXLOCAL_READ_SEG(GS, gs); AssertRCReturn(rc, rc);
6382
6383 /* Restore segment attributes for real-on-v86 mode hack. */
6384 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6385 {
6386 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
6387 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
6388 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
6389 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
6390 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
6391 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
6392 }
6393 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS);
6394 }
6395
6396 return VINF_SUCCESS;
6397}
6398
6399
6400/**
6401 * Saves the guest descriptor table registers and task register from the current
6402 * VMCS into the guest-CPU context.
6403 *
6404 * @returns VBox status code.
6405 * @param pVCpu The cross context virtual CPU structure.
6406 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6407 * out-of-sync. Make sure to update the required fields
6408 * before using them.
6409 *
6410 * @remarks No-long-jump zone!!!
6411 */
6412static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6413{
6414 int rc = VINF_SUCCESS;
6415
6416 /* Guest LDTR. */
6417 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR))
6418 {
6419 rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
6420 AssertRCReturn(rc, rc);
6421 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR);
6422 }
6423
6424 /* Guest GDTR. */
6425 uint64_t u64Val = 0;
6426 uint32_t u32Val = 0;
6427 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR))
6428 {
6429 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
6430 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
6431 pMixedCtx->gdtr.pGdt = u64Val;
6432 pMixedCtx->gdtr.cbGdt = u32Val;
6433 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR);
6434 }
6435
6436 /* Guest IDTR. */
6437 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR))
6438 {
6439 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
6440 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
6441 pMixedCtx->idtr.pIdt = u64Val;
6442 pMixedCtx->idtr.cbIdt = u32Val;
6443 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR);
6444 }
6445
6446 /* Guest TR. */
6447 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR))
6448 {
6449 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6450 AssertRCReturn(rc, rc);
6451
6452 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
6453 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6454 {
6455 rc = VMXLOCAL_READ_SEG(TR, tr);
6456 AssertRCReturn(rc, rc);
6457 }
6458 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR);
6459 }
6460 return rc;
6461}
6462
6463#undef VMXLOCAL_READ_SEG
6464
6465
6466/**
6467 * Saves the guest debug-register DR7 from the current VMCS into the guest-CPU
6468 * context.
6469 *
6470 * @returns VBox status code.
6471 * @param pVCpu The cross context virtual CPU structure.
6472 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6473 * out-of-sync. Make sure to update the required fields
6474 * before using them.
6475 *
6476 * @remarks No-long-jump zone!!!
6477 */
6478static int hmR0VmxSaveGuestDR7(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6479{
6480 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG))
6481 {
6482 if (!pVCpu->hm.s.fUsingHyperDR7)
6483 {
6484 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
6485 uint32_t u32Val;
6486 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); AssertRCReturn(rc, rc);
6487 pMixedCtx->dr[7] = u32Val;
6488 }
6489
6490 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG);
6491 }
6492 return VINF_SUCCESS;
6493}
6494
6495
6496/**
6497 * Saves the guest APIC state from the current VMCS into the guest-CPU context.
6498 *
6499 * @returns VBox status code.
6500 * @param pVCpu The cross context virtual CPU structure.
6501 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6502 * out-of-sync. Make sure to update the required fields
6503 * before using them.
6504 *
6505 * @remarks No-long-jump zone!!!
6506 */
6507static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6508{
6509 NOREF(pMixedCtx);
6510
6511 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
6512 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_APIC_STATE);
6513 return VINF_SUCCESS;
6514}
6515
6516
6517/**
6518 * Saves the entire guest state from the currently active VMCS into the
6519 * guest-CPU context.
6520 *
6521 * This essentially VMREADs all guest-data.
6522 *
6523 * @returns VBox status code.
6524 * @param pVCpu The cross context virtual CPU structure.
6525 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6526 * out-of-sync. Make sure to update the required fields
6527 * before using them.
6528 */
6529static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6530{
6531 Assert(pVCpu);
6532 Assert(pMixedCtx);
6533
6534 if (HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL)
6535 return VINF_SUCCESS;
6536
6537 /* Though we can longjmp to ring-3 due to log-flushes here and get recalled
6538 again on the ring-3 callback path, there is no real need to. */
6539 if (VMMRZCallRing3IsEnabled(pVCpu))
6540 VMMR0LogFlushDisable(pVCpu);
6541 else
6542 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6543 Log4Func(("vcpu[%RU32]\n", pVCpu->idCpu));
6544
6545 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
6546 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6547
6548 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6549 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6550
6551 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6552 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6553
6554 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
6555 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6556
6557 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
6558 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDR7 failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6559
6560 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
6561 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6562
6563 rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
6564 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestLazyMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6565
6566 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
6567 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6568
6569 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
6570 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6571
6572 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
6573 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestApicState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6574
6575 AssertMsg(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL,
6576 ("Missed guest state bits while saving state; residue %RX32\n", HMVMXCPU_GST_VALUE(pVCpu)));
6577
6578 if (VMMRZCallRing3IsEnabled(pVCpu))
6579 VMMR0LogFlushEnable(pVCpu);
6580
6581 return VINF_SUCCESS;
6582}
6583
6584
6585/**
6586 * Saves basic guest registers needed for IEM instruction execution.
6587 *
6588 * @returns VBox status code (OR-able).
6589 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
6590 * @param pMixedCtx Pointer to the CPU context of the guest.
6591 * @param fMemory Whether the instruction being executed operates on
6592 * memory or not. Only CR0 is synced up if clear.
6593 * @param fNeedRsp Need RSP (any instruction working on GPRs or stack).
6594 */
6595static int hmR0VmxSaveGuestRegsForIemExec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fMemory, bool fNeedRsp)
6596{
6597 /*
6598 * We assume all general purpose registers other than RSP are available.
6599 *
6600 * RIP is a must, as it will be incremented or otherwise changed.
6601 *
6602 * RFLAGS are always required to figure the CPL.
6603 *
6604 * RSP isn't always required, however it's a GPR, so frequently required.
6605 *
6606 * SS and CS are the only segment register needed if IEM doesn't do memory
6607 * access (CPL + 16/32/64-bit mode), but we can only get all segment registers.
6608 *
6609 * CR0 is always required by IEM for the CPL, while CR3 and CR4 will only
6610 * be required for memory accesses.
6611 *
6612 * Note! Before IEM dispatches an exception, it will call us to sync in everything.
6613 */
6614 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6615 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6616 if (fNeedRsp)
6617 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
6618 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6619 if (!fMemory)
6620 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6621 else
6622 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6623 AssertRCReturn(rc, rc);
6624 return rc;
6625}
6626
6627
6628/**
6629 * Ensures that we've got a complete basic guest-context.
6630 *
6631 * This excludes the FPU, SSE, AVX, and similar extended state. The interface
6632 * is for the interpreter.
6633 *
6634 * @returns VBox status code.
6635 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
6636 * @param pMixedCtx Pointer to the guest-CPU context which may have data
6637 * needing to be synced in.
6638 * @thread EMT(pVCpu)
6639 */
6640VMMR0_INT_DECL(int) HMR0EnsureCompleteBasicContext(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6641{
6642 /* Note! Since this is only applicable to VT-x, the implementation is placed
6643 in the VT-x part of the sources instead of the generic stuff. */
6644 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
6645 return hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6646 return VINF_SUCCESS;
6647}
6648
6649
6650/**
6651 * Check per-VM and per-VCPU force flag actions that require us to go back to
6652 * ring-3 for one reason or another.
6653 *
6654 * @returns VBox status code (information status code included).
6655 * @retval VINF_SUCCESS if we don't have any actions that require going back to
6656 * ring-3.
6657 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
6658 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
6659 * interrupts)
6660 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
6661 * all EMTs to be in ring-3.
6662 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
6663 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
6664 * to the EM loop.
6665 *
6666 * @param pVM The cross context VM structure.
6667 * @param pVCpu The cross context virtual CPU structure.
6668 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6669 * out-of-sync. Make sure to update the required fields
6670 * before using them.
6671 */
6672static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6673{
6674 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6675
6676 if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
6677 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
6678 || VMCPU_FF_IS_PENDING(pVCpu, !pVCpu->hm.s.fSingleInstruction
6679 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
6680 {
6681 /* We need the control registers now, make sure the guest-CPU context is updated. */
6682 int rc3 = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6683 AssertRCReturn(rc3, rc3);
6684
6685 /* Pending HM CR3 sync. */
6686 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6687 {
6688 int rc2 = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
6689 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
6690 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
6691 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6692 }
6693
6694 /* Pending HM PAE PDPEs. */
6695 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6696 {
6697 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6698 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6699 }
6700
6701 /* Pending PGM C3 sync. */
6702 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
6703 {
6704 int rc2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4,
6705 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
6706 if (rc2 != VINF_SUCCESS)
6707 {
6708 AssertRC(rc2);
6709 Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", rc2));
6710 return rc2;
6711 }
6712 }
6713
6714 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
6715 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
6716 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
6717 {
6718 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
6719 int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
6720 Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
6721 return rc2;
6722 }
6723
6724 /* Pending VM request packets, such as hardware interrupts. */
6725 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
6726 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
6727 {
6728 Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
6729 return VINF_EM_PENDING_REQUEST;
6730 }
6731
6732 /* Pending PGM pool flushes. */
6733 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
6734 {
6735 Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
6736 return VINF_PGM_POOL_FLUSH_PENDING;
6737 }
6738
6739 /* Pending DMA requests. */
6740 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
6741 {
6742 Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
6743 return VINF_EM_RAW_TO_R3;
6744 }
6745 }
6746
6747 return VINF_SUCCESS;
6748}
6749
6750
6751/**
6752 * Converts any TRPM trap into a pending HM event. This is typically used when
6753 * entering from ring-3 (not longjmp returns).
6754 *
6755 * @param pVCpu The cross context virtual CPU structure.
6756 */
6757static void hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
6758{
6759 Assert(TRPMHasTrap(pVCpu));
6760 Assert(!pVCpu->hm.s.Event.fPending);
6761
6762 uint8_t uVector;
6763 TRPMEVENT enmTrpmEvent;
6764 RTGCUINT uErrCode;
6765 RTGCUINTPTR GCPtrFaultAddress;
6766 uint8_t cbInstr;
6767
6768 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
6769 AssertRC(rc);
6770
6771 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntInfo. */
6772 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
6773 if (enmTrpmEvent == TRPM_TRAP)
6774 {
6775 switch (uVector)
6776 {
6777 case X86_XCPT_NMI:
6778 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6779 break;
6780
6781 case X86_XCPT_BP:
6782 case X86_XCPT_OF:
6783 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6784 break;
6785
6786 case X86_XCPT_PF:
6787 case X86_XCPT_DF:
6788 case X86_XCPT_TS:
6789 case X86_XCPT_NP:
6790 case X86_XCPT_SS:
6791 case X86_XCPT_GP:
6792 case X86_XCPT_AC:
6793 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6794 /* no break! */
6795 default:
6796 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6797 break;
6798 }
6799 }
6800 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
6801 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6802 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
6803 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6804 else
6805 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
6806
6807 rc = TRPMResetTrap(pVCpu);
6808 AssertRC(rc);
6809 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
6810 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
6811
6812 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
6813 STAM_COUNTER_DEC(&pVCpu->hm.s.StatInjectPendingReflect);
6814}
6815
6816
6817/**
6818 * Converts the pending HM event into a TRPM trap.
6819 *
6820 * @param pVCpu The cross context virtual CPU structure.
6821 */
6822static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
6823{
6824 Assert(pVCpu->hm.s.Event.fPending);
6825
6826 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
6827 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo);
6828 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntInfo);
6829 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
6830
6831 /* If a trap was already pending, we did something wrong! */
6832 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
6833
6834 TRPMEVENT enmTrapType;
6835 switch (uVectorType)
6836 {
6837 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
6838 enmTrapType = TRPM_HARDWARE_INT;
6839 break;
6840
6841 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
6842 enmTrapType = TRPM_SOFTWARE_INT;
6843 break;
6844
6845 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
6846 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
6847 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
6848 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
6849 enmTrapType = TRPM_TRAP;
6850 break;
6851
6852 default:
6853 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
6854 enmTrapType = TRPM_32BIT_HACK;
6855 break;
6856 }
6857
6858 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
6859
6860 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
6861 AssertRC(rc);
6862
6863 if (fErrorCodeValid)
6864 TRPMSetErrorCode(pVCpu, uErrorCode);
6865
6866 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6867 && uVector == X86_XCPT_PF)
6868 {
6869 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
6870 }
6871 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6872 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6873 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6874 {
6875 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6876 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
6877 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
6878 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
6879 }
6880
6881 /* Clear any pending events from the VMCS. */
6882 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0); AssertRC(rc);
6883 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0); AssertRC(rc);
6884
6885 /* We're now done converting the pending event. */
6886 pVCpu->hm.s.Event.fPending = false;
6887}
6888
6889
6890/**
6891 * Does the necessary state syncing before returning to ring-3 for any reason
6892 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
6893 *
6894 * @returns VBox status code.
6895 * @param pVM The cross context VM structure.
6896 * @param pVCpu The cross context virtual CPU structure.
6897 * @param pMixedCtx Pointer to the guest-CPU context. The data may
6898 * be out-of-sync. Make sure to update the required
6899 * fields before using them.
6900 * @param fSaveGuestState Whether to save the guest state or not.
6901 *
6902 * @remarks No-long-jmp zone!!!
6903 */
6904static int hmR0VmxLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fSaveGuestState)
6905{
6906 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6907 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6908
6909 RTCPUID idCpu = RTMpCpuId();
6910 Log4Func(("HostCpuId=%u\n", idCpu));
6911
6912 /*
6913 * !!! IMPORTANT !!!
6914 * If you modify code here, check whether hmR0VmxCallRing3Callback() needs to be updated too.
6915 */
6916
6917 /* Save the guest state if necessary. */
6918 if ( fSaveGuestState
6919 && HMVMXCPU_GST_VALUE(pVCpu) != HMVMX_UPDATED_GUEST_ALL)
6920 {
6921 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6922 AssertRCReturn(rc, rc);
6923 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
6924 }
6925
6926 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
6927 if (CPUMIsGuestFPUStateActive(pVCpu))
6928 {
6929 /* We shouldn't reload CR0 without saving it first. */
6930 if (!fSaveGuestState)
6931 {
6932 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6933 AssertRCReturn(rc, rc);
6934 }
6935 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
6936 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
6937 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
6938 }
6939
6940 /* Restore host debug registers if necessary and resync on next R0 reentry. */
6941#ifdef VBOX_STRICT
6942 if (CPUMIsHyperDebugStateActive(pVCpu))
6943 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
6944#endif
6945 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
6946 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
6947 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu));
6948 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu));
6949
6950#if HC_ARCH_BITS == 64
6951 /* Restore host-state bits that VT-x only restores partially. */
6952 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
6953 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
6954 {
6955 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu));
6956 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
6957 }
6958 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
6959#endif
6960
6961#if HC_ARCH_BITS == 64
6962 /* Restore the lazy host MSRs as we're leaving VT-x context. */
6963 if ( pVM->hm.s.fAllow64BitGuests
6964 && pVCpu->hm.s.vmx.fLazyMsrs)
6965 {
6966 /* We shouldn't reload the guest MSRs without saving it first. */
6967 if (!fSaveGuestState)
6968 {
6969 int rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
6970 AssertRCReturn(rc, rc);
6971 }
6972 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS));
6973 hmR0VmxLazyRestoreHostMsrs(pVCpu);
6974 Assert(!pVCpu->hm.s.vmx.fLazyMsrs);
6975 }
6976#endif
6977
6978 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
6979 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
6980
6981 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
6982 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
6983 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
6984 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
6985 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
6986 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
6987 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
6988 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
6989
6990 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
6991
6992 /** @todo This partially defeats the purpose of having preemption hooks.
6993 * The problem is, deregistering the hooks should be moved to a place that
6994 * lasts until the EMT is about to be destroyed not everytime while leaving HM
6995 * context.
6996 */
6997 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
6998 {
6999 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7000 AssertRCReturn(rc, rc);
7001
7002 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7003 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
7004 }
7005 Assert(!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
7006 NOREF(idCpu);
7007
7008 return VINF_SUCCESS;
7009}
7010
7011
7012/**
7013 * Leaves the VT-x session.
7014 *
7015 * @returns VBox status code.
7016 * @param pVM The cross context VM structure.
7017 * @param pVCpu The cross context virtual CPU structure.
7018 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7019 * out-of-sync. Make sure to update the required fields
7020 * before using them.
7021 *
7022 * @remarks No-long-jmp zone!!!
7023 */
7024DECLINLINE(int) hmR0VmxLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7025{
7026 HM_DISABLE_PREEMPT();
7027 HMVMX_ASSERT_CPU_SAFE();
7028 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7029 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7030
7031 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
7032 and done this from the VMXR0ThreadCtxCallback(). */
7033 if (!pVCpu->hm.s.fLeaveDone)
7034 {
7035 int rc2 = hmR0VmxLeave(pVM, pVCpu, pMixedCtx, true /* fSaveGuestState */);
7036 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2);
7037 pVCpu->hm.s.fLeaveDone = true;
7038 }
7039 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
7040
7041 /*
7042 * !!! IMPORTANT !!!
7043 * If you modify code here, make sure to check whether hmR0VmxCallRing3Callback() needs to be updated too.
7044 */
7045
7046 /* Deregister hook now that we've left HM context before re-enabling preemption. */
7047 /** @todo Deregistering here means we need to VMCLEAR always
7048 * (longjmp/exit-to-r3) in VT-x which is not efficient. */
7049 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
7050 VMMR0ThreadCtxHookDisable(pVCpu);
7051
7052 /* Leave HM context. This takes care of local init (term). */
7053 int rc = HMR0LeaveCpu(pVCpu);
7054
7055 HM_RESTORE_PREEMPT();
7056 return rc;
7057}
7058
7059
7060/**
7061 * Does the necessary state syncing before doing a longjmp to ring-3.
7062 *
7063 * @returns VBox status code.
7064 * @param pVM The cross context VM structure.
7065 * @param pVCpu The cross context virtual CPU structure.
7066 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7067 * out-of-sync. Make sure to update the required fields
7068 * before using them.
7069 *
7070 * @remarks No-long-jmp zone!!!
7071 */
7072DECLINLINE(int) hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7073{
7074 return hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
7075}
7076
7077
7078/**
7079 * Take necessary actions before going back to ring-3.
7080 *
7081 * An action requires us to go back to ring-3. This function does the necessary
7082 * steps before we can safely return to ring-3. This is not the same as longjmps
7083 * to ring-3, this is voluntary and prepares the guest so it may continue
7084 * executing outside HM (recompiler/IEM).
7085 *
7086 * @returns VBox status code.
7087 * @param pVM The cross context VM structure.
7088 * @param pVCpu The cross context virtual CPU structure.
7089 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7090 * out-of-sync. Make sure to update the required fields
7091 * before using them.
7092 * @param rcExit The reason for exiting to ring-3. Can be
7093 * VINF_VMM_UNKNOWN_RING3_CALL.
7094 */
7095static int hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
7096{
7097 Assert(pVM);
7098 Assert(pVCpu);
7099 Assert(pMixedCtx);
7100 HMVMX_ASSERT_PREEMPT_SAFE();
7101
7102 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
7103 {
7104 VMXGetActivatedVmcs(&pVCpu->hm.s.vmx.LastError.u64VMCSPhys);
7105 pVCpu->hm.s.vmx.LastError.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
7106 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
7107 /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */
7108 }
7109
7110 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
7111 VMMRZCallRing3Disable(pVCpu);
7112 Log4(("hmR0VmxExitToRing3: pVCpu=%p idCpu=%RU32 rcExit=%d\n", pVCpu, pVCpu->idCpu, rcExit));
7113
7114 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
7115 if (pVCpu->hm.s.Event.fPending)
7116 {
7117 hmR0VmxPendingEventToTrpmTrap(pVCpu);
7118 Assert(!pVCpu->hm.s.Event.fPending);
7119 }
7120
7121 /* Clear interrupt-window and NMI-window controls as we re-evaluate it when we return from ring-3. */
7122 hmR0VmxClearIntNmiWindowsVmcs(pVCpu);
7123
7124 /* If we're emulating an instruction, we shouldn't have any TRPM traps pending
7125 and if we're injecting an event we should have a TRPM trap pending. */
7126 AssertMsg(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu), ("%Rrc\n", rcExit));
7127#ifndef DEBUG_bird /* Triggered after firing an NMI against NT4SP1, possibly a tripple fault in progress. */
7128 AssertMsg(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu), ("%Rrc\n", rcExit));
7129#endif
7130
7131 /* Save guest state and restore host state bits. */
7132 int rc = hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
7133 AssertRCReturn(rc, rc);
7134 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7135 /* Thread-context hooks are unregistered at this point!!! */
7136
7137 /* Sync recompiler state. */
7138 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
7139 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
7140 | CPUM_CHANGED_LDTR
7141 | CPUM_CHANGED_GDTR
7142 | CPUM_CHANGED_IDTR
7143 | CPUM_CHANGED_TR
7144 | CPUM_CHANGED_HIDDEN_SEL_REGS);
7145 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
7146 if ( pVM->hm.s.fNestedPaging
7147 && CPUMIsGuestPagingEnabledEx(pMixedCtx))
7148 {
7149 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
7150 }
7151
7152 Assert(!pVCpu->hm.s.fClearTrapFlag);
7153
7154 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
7155 if (rcExit != VINF_EM_RAW_INTERRUPT)
7156 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
7157
7158 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
7159
7160 /* We do -not- want any longjmp notifications after this! We must return to ring-3 ASAP. */
7161 VMMRZCallRing3RemoveNotification(pVCpu);
7162 VMMRZCallRing3Enable(pVCpu);
7163
7164 return rc;
7165}
7166
7167
7168/**
7169 * VMMRZCallRing3() callback wrapper which saves the guest state before we
7170 * longjump to ring-3 and possibly get preempted.
7171 *
7172 * @returns VBox status code.
7173 * @param pVCpu The cross context virtual CPU structure.
7174 * @param enmOperation The operation causing the ring-3 longjump.
7175 * @param pvUser Opaque pointer to the guest-CPU context. The data
7176 * may be out-of-sync. Make sure to update the required
7177 * fields before using them.
7178 */
7179static DECLCALLBACK(int) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
7180{
7181 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
7182 {
7183 /*
7184 * !!! IMPORTANT !!!
7185 * If you modify code here, check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs to be updated too.
7186 * This is a stripped down version which gets out ASAP, trying to not trigger any further assertions.
7187 */
7188 VMMRZCallRing3RemoveNotification(pVCpu);
7189 VMMRZCallRing3Disable(pVCpu);
7190 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
7191 RTThreadPreemptDisable(&PreemptState);
7192
7193 PVM pVM = pVCpu->CTX_SUFF(pVM);
7194 if (CPUMIsGuestFPUStateActive(pVCpu))
7195 CPUMR0SaveGuestFPU(pVM, pVCpu, (PCPUMCTX)pvUser);
7196
7197 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
7198
7199#if HC_ARCH_BITS == 64
7200 /* Restore host-state bits that VT-x only restores partially. */
7201 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
7202 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
7203 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
7204 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
7205
7206 /* Restore the lazy host MSRs as we're leaving VT-x context. */
7207 if ( pVM->hm.s.fAllow64BitGuests
7208 && pVCpu->hm.s.vmx.fLazyMsrs)
7209 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7210#endif
7211 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
7212 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7213 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7214 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7215 {
7216 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7217 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7218 }
7219
7220 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
7221 VMMR0ThreadCtxHookDisable(pVCpu);
7222 HMR0LeaveCpu(pVCpu);
7223 RTThreadPreemptRestore(&PreemptState);
7224 return VINF_SUCCESS;
7225 }
7226
7227 Assert(pVCpu);
7228 Assert(pvUser);
7229 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7230 HMVMX_ASSERT_PREEMPT_SAFE();
7231
7232 VMMRZCallRing3Disable(pVCpu);
7233 Assert(VMMR0IsLogFlushDisabled(pVCpu));
7234
7235 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32 enmOperation=%d\n", pVCpu, pVCpu->idCpu,
7236 enmOperation));
7237
7238 int rc = hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
7239 AssertRCReturn(rc, rc);
7240
7241 VMMRZCallRing3Enable(pVCpu);
7242 return VINF_SUCCESS;
7243}
7244
7245
7246/**
7247 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
7248 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
7249 *
7250 * @param pVCpu The cross context virtual CPU structure.
7251 */
7252DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
7253{
7254 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7255 {
7256 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7257 {
7258 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7259 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7260 AssertRC(rc);
7261 Log4(("Setup interrupt-window exiting\n"));
7262 }
7263 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
7264}
7265
7266
7267/**
7268 * Clears the interrupt-window exiting control in the VMCS.
7269 *
7270 * @param pVCpu The cross context virtual CPU structure.
7271 */
7272DECLINLINE(void) hmR0VmxClearIntWindowExitVmcs(PVMCPU pVCpu)
7273{
7274 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7275 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7276 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7277 AssertRC(rc);
7278 Log4(("Cleared interrupt-window exiting\n"));
7279}
7280
7281
7282/**
7283 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
7284 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
7285 *
7286 * @param pVCpu The cross context virtual CPU structure.
7287 */
7288DECLINLINE(void) hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu)
7289{
7290 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7291 {
7292 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7293 {
7294 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7295 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7296 AssertRC(rc);
7297 Log4(("Setup NMI-window exiting\n"));
7298 }
7299 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
7300}
7301
7302
7303/**
7304 * Clears the NMI-window exiting control in the VMCS.
7305 *
7306 * @param pVCpu The cross context virtual CPU structure.
7307 */
7308DECLINLINE(void) hmR0VmxClearNmiWindowExitVmcs(PVMCPU pVCpu)
7309{
7310 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT);
7311 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7312 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7313 AssertRC(rc);
7314 Log4(("Cleared NMI-window exiting\n"));
7315}
7316
7317
7318/**
7319 * Evaluates the event to be delivered to the guest and sets it as the pending
7320 * event.
7321 *
7322 * @param pVCpu The cross context virtual CPU structure.
7323 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7324 * out-of-sync. Make sure to update the required fields
7325 * before using them.
7326 */
7327static void hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7328{
7329 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
7330 uint32_t const uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7331 bool const fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7332 bool const fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7333 bool const fBlockNmi = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
7334
7335 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7336 Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7337 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7338 Assert(!TRPMHasTrap(pVCpu));
7339
7340 /*
7341 * Toggling of interrupt force-flags here is safe since we update TRPM on premature exits
7342 * to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must NOT restore these force-flags.
7343 */
7344 /** @todo SMI. SMIs take priority over NMIs. */
7345 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */
7346 {
7347 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
7348 if ( !pVCpu->hm.s.Event.fPending
7349 && !fBlockNmi
7350 && !fBlockSti
7351 && !fBlockMovSS)
7352 {
7353 Log4(("Pending NMI vcpu[%RU32]\n", pVCpu->idCpu));
7354 uint32_t u32IntInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
7355 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7356
7357 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7358 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
7359 }
7360 else
7361 hmR0VmxSetNmiWindowExitVmcs(pVCpu);
7362 }
7363 /*
7364 * Check if the guest can receive external interrupts (PIC/APIC). Once we do PDMGetInterrupt() we -must- deliver
7365 * the interrupt ASAP. We must not execute any guest code until we inject the interrupt.
7366 */
7367 else if ( VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
7368 && !pVCpu->hm.s.fSingleInstruction)
7369 {
7370 Assert(!DBGFIsStepping(pVCpu));
7371 int rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7372 AssertRC(rc);
7373 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7374 if ( !pVCpu->hm.s.Event.fPending
7375 && !fBlockInt
7376 && !fBlockSti
7377 && !fBlockMovSS)
7378 {
7379 uint8_t u8Interrupt;
7380 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
7381 if (RT_SUCCESS(rc))
7382 {
7383 Log4(("Pending interrupt vcpu[%RU32] u8Interrupt=%#x \n", pVCpu->idCpu, u8Interrupt));
7384 uint32_t u32IntInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID;
7385 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7386
7387 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrfaultAddress */);
7388 }
7389 else
7390 {
7391 /** @todo Does this actually happen? If not turn it into an assertion. */
7392 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
7393 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
7394 }
7395 }
7396 else
7397 hmR0VmxSetIntWindowExitVmcs(pVCpu);
7398 }
7399}
7400
7401
7402/**
7403 * Sets a pending-debug exception to be delivered to the guest if the guest is
7404 * single-stepping in the VMCS.
7405 *
7406 * @param pVCpu The cross context virtual CPU structure.
7407 */
7408DECLINLINE(void) hmR0VmxSetPendingDebugXcptVmcs(PVMCPU pVCpu)
7409{
7410 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS)); NOREF(pVCpu);
7411 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
7412 AssertRC(rc);
7413}
7414
7415
7416/**
7417 * Injects any pending events into the guest if the guest is in a state to
7418 * receive them.
7419 *
7420 * @returns VBox status code (informational status codes included).
7421 * @param pVCpu The cross context virtual CPU structure.
7422 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7423 * out-of-sync. Make sure to update the required fields
7424 * before using them.
7425 * @param fStepping Running in hmR0VmxRunGuestCodeStep() and we should
7426 * return VINF_EM_DBG_STEPPED if the event was
7427 * dispatched directly.
7428 */
7429static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping)
7430{
7431 HMVMX_ASSERT_PREEMPT_SAFE();
7432 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7433
7434 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
7435 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7436 bool fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7437 bool fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7438
7439 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7440 Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7441 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7442 Assert(!TRPMHasTrap(pVCpu));
7443
7444 int rc = VINF_SUCCESS;
7445 if (pVCpu->hm.s.Event.fPending)
7446 {
7447 /*
7448 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
7449 * pending even while injecting an event and in this case, we want a VM-exit as soon as
7450 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
7451 *
7452 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
7453 */
7454 uint32_t const uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
7455#ifdef VBOX_STRICT
7456 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7457 {
7458 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7459 Assert(!fBlockInt);
7460 Assert(!fBlockSti);
7461 Assert(!fBlockMovSS);
7462 }
7463 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
7464 {
7465 bool const fBlockNmi = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
7466 Assert(!fBlockSti);
7467 Assert(!fBlockMovSS);
7468 Assert(!fBlockNmi);
7469 }
7470#endif
7471 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#x\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
7472 (uint8_t)uIntType));
7473 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
7474 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping, &uIntrState);
7475 AssertRCReturn(rc, rc);
7476
7477 /* Update the interruptibility-state as it could have been changed by
7478 hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */
7479 fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7480 fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7481
7482 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7483 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
7484 else
7485 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
7486 }
7487
7488 /* Deliver pending debug exception if the guest is single-stepping. Evaluate and set the BS bit. */
7489 if ( fBlockSti
7490 || fBlockMovSS)
7491 {
7492 if (!pVCpu->hm.s.fSingleInstruction)
7493 {
7494 /*
7495 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD,
7496 * VMX_EXIT_MTF, VMX_EXIT_APIC_WRITE and VMX_EXIT_VIRTUALIZED_EOI.
7497 * See Intel spec. 27.3.4 "Saving Non-Register State".
7498 */
7499 Assert(!DBGFIsStepping(pVCpu));
7500 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7501 AssertRCReturn(rc2, rc2);
7502 if (pMixedCtx->eflags.Bits.u1TF)
7503 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
7504 }
7505 else if (pMixedCtx->eflags.Bits.u1TF)
7506 {
7507 /*
7508 * We are single-stepping in the hypervisor debugger using EFLAGS.TF. Clear interrupt inhibition as setting the
7509 * BS bit would mean delivering a #DB to the guest upon VM-entry when it shouldn't be.
7510 */
7511 Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG));
7512 uIntrState = 0;
7513 }
7514 }
7515
7516 /*
7517 * There's no need to clear the VM-entry interruption-information field here if we're not injecting anything.
7518 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7519 */
7520 int rc2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
7521 AssertRC(rc2);
7522
7523 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping));
7524 NOREF(fBlockMovSS); NOREF(fBlockSti);
7525 return rc;
7526}
7527
7528
7529/**
7530 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
7531 *
7532 * @param pVCpu The cross context virtual CPU structure.
7533 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7534 * out-of-sync. Make sure to update the required fields
7535 * before using them.
7536 */
7537DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7538{
7539 NOREF(pMixedCtx);
7540 uint32_t u32IntInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID;
7541 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7542}
7543
7544
7545/**
7546 * Injects a double-fault (\#DF) exception into the VM.
7547 *
7548 * @returns VBox status code (informational status code included).
7549 * @param pVCpu The cross context virtual CPU structure.
7550 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7551 * out-of-sync. Make sure to update the required fields
7552 * before using them.
7553 * @param fStepping Whether we're running in hmR0VmxRunGuestCodeStep()
7554 * and should return VINF_EM_DBG_STEPPED if the event
7555 * is injected directly (register modified by us, not
7556 * by hardware on VM-entry).
7557 * @param puIntrState Pointer to the current guest interruptibility-state.
7558 * This interruptibility-state will be updated if
7559 * necessary. This cannot not be NULL.
7560 */
7561DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping, uint32_t *puIntrState)
7562{
7563 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7564 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7565 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7566 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
7567 fStepping, puIntrState);
7568}
7569
7570
7571/**
7572 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
7573 *
7574 * @param pVCpu The cross context virtual CPU structure.
7575 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7576 * out-of-sync. Make sure to update the required fields
7577 * before using them.
7578 */
7579DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7580{
7581 NOREF(pMixedCtx);
7582 uint32_t u32IntInfo = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID;
7583 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7584 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7585}
7586
7587
7588/**
7589 * Sets an overflow (\#OF) exception as pending-for-injection into the VM.
7590 *
7591 * @param pVCpu The cross context virtual CPU structure.
7592 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7593 * out-of-sync. Make sure to update the required fields
7594 * before using them.
7595 * @param cbInstr The value of RIP that is to be pushed on the guest
7596 * stack.
7597 */
7598DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
7599{
7600 NOREF(pMixedCtx);
7601 uint32_t u32IntInfo = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7602 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7603 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7604}
7605
7606
7607/**
7608 * Injects a general-protection (\#GP) fault into the VM.
7609 *
7610 * @returns VBox status code (informational status code included).
7611 * @param pVCpu The cross context virtual CPU structure.
7612 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7613 * out-of-sync. Make sure to update the required fields
7614 * before using them.
7615 * @param fErrorCodeValid Whether the error code is valid (depends on the CPU
7616 * mode, i.e. in real-mode it's not valid).
7617 * @param u32ErrorCode The error code associated with the \#GP.
7618 * @param fStepping Whether we're running in
7619 * hmR0VmxRunGuestCodeStep() and should return
7620 * VINF_EM_DBG_STEPPED if the event is injected
7621 * directly (register modified by us, not by
7622 * hardware on VM-entry).
7623 * @param puIntrState Pointer to the current guest interruptibility-state.
7624 * This interruptibility-state will be updated if
7625 * necessary. This cannot not be NULL.
7626 */
7627DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
7628 bool fStepping, uint32_t *puIntrState)
7629{
7630 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7631 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7632 if (fErrorCodeValid)
7633 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7634 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
7635 fStepping, puIntrState);
7636}
7637
7638
7639/**
7640 * Sets a general-protection (\#GP) exception as pending-for-injection into the
7641 * VM.
7642 *
7643 * @param pVCpu The cross context virtual CPU structure.
7644 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7645 * out-of-sync. Make sure to update the required fields
7646 * before using them.
7647 * @param u32ErrorCode The error code associated with the \#GP.
7648 */
7649DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t u32ErrorCode)
7650{
7651 NOREF(pMixedCtx);
7652 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7653 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7654 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7655 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */);
7656}
7657
7658
7659/**
7660 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
7661 *
7662 * @param pVCpu The cross context virtual CPU structure.
7663 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7664 * out-of-sync. Make sure to update the required fields
7665 * before using them.
7666 * @param uVector The software interrupt vector number.
7667 * @param cbInstr The value of RIP that is to be pushed on the guest
7668 * stack.
7669 */
7670DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
7671{
7672 NOREF(pMixedCtx);
7673 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
7674 if ( uVector == X86_XCPT_BP
7675 || uVector == X86_XCPT_OF)
7676 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7677 else
7678 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7679 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7680}
7681
7682
7683/**
7684 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
7685 * stack.
7686 *
7687 * @returns VBox status code (information status code included).
7688 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
7689 * @param pVM The cross context VM structure.
7690 * @param pMixedCtx Pointer to the guest-CPU context.
7691 * @param uValue The value to push to the guest stack.
7692 */
7693DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
7694{
7695 /*
7696 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
7697 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
7698 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
7699 */
7700 if (pMixedCtx->sp == 1)
7701 return VINF_EM_RESET;
7702 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
7703 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
7704 AssertRCReturn(rc, rc);
7705 return rc;
7706}
7707
7708
7709/**
7710 * Injects an event into the guest upon VM-entry by updating the relevant fields
7711 * in the VM-entry area in the VMCS.
7712 *
7713 * @returns VBox status code (informational error codes included).
7714 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
7715 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
7716 *
7717 * @param pVCpu The cross context virtual CPU structure.
7718 * @param pMixedCtx Pointer to the guest-CPU context. The data may
7719 * be out-of-sync. Make sure to update the required
7720 * fields before using them.
7721 * @param u64IntInfo The VM-entry interruption-information field.
7722 * @param cbInstr The VM-entry instruction length in bytes (for
7723 * software interrupts, exceptions and privileged
7724 * software exceptions).
7725 * @param u32ErrCode The VM-entry exception error code.
7726 * @param GCPtrFaultAddress The page-fault address for \#PF exceptions.
7727 * @param puIntrState Pointer to the current guest interruptibility-state.
7728 * This interruptibility-state will be updated if
7729 * necessary. This cannot not be NULL.
7730 * @param fStepping Whether we're running in
7731 * hmR0VmxRunGuestCodeStep() and should return
7732 * VINF_EM_DBG_STEPPED if the event is injected
7733 * directly (register modified by us, not by
7734 * hardware on VM-entry).
7735 *
7736 * @remarks Requires CR0!
7737 * @remarks No-long-jump zone!!!
7738 */
7739static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
7740 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *puIntrState)
7741{
7742 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
7743 AssertMsg(u64IntInfo >> 32 == 0, ("%#RX64\n", u64IntInfo));
7744 Assert(puIntrState);
7745 uint32_t u32IntInfo = (uint32_t)u64IntInfo;
7746
7747 uint32_t const uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntInfo);
7748 uint32_t const uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo);
7749
7750#ifdef VBOX_STRICT
7751 /* Validate the error-code-valid bit for hardware exceptions. */
7752 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT)
7753 {
7754 switch (uVector)
7755 {
7756 case X86_XCPT_PF:
7757 case X86_XCPT_DF:
7758 case X86_XCPT_TS:
7759 case X86_XCPT_NP:
7760 case X86_XCPT_SS:
7761 case X86_XCPT_GP:
7762 case X86_XCPT_AC:
7763 AssertMsg(VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo),
7764 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
7765 /* fallthru */
7766 default:
7767 break;
7768 }
7769 }
7770#endif
7771
7772 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
7773 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
7774 || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
7775
7776 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
7777
7778 /* We require CR0 to check if the guest is in real-mode. */
7779 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7780 AssertRCReturn(rc, rc);
7781
7782 /*
7783 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
7784 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
7785 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
7786 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
7787 */
7788 if (CPUMIsGuestInRealModeEx(pMixedCtx))
7789 {
7790 PVM pVM = pVCpu->CTX_SUFF(pVM);
7791 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
7792 {
7793 Assert(PDMVmmDevHeapIsEnabled(pVM));
7794 Assert(pVM->hm.s.vmx.pRealModeTSS);
7795
7796 /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */
7797 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7798 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
7799 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
7800 AssertRCReturn(rc, rc);
7801 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP));
7802
7803 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
7804 size_t const cbIdtEntry = sizeof(X86IDTR16);
7805 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
7806 {
7807 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
7808 if (uVector == X86_XCPT_DF)
7809 return VINF_EM_RESET;
7810
7811 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
7812 if (uVector == X86_XCPT_GP)
7813 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, puIntrState);
7814
7815 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
7816 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
7817 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */,
7818 fStepping, puIntrState);
7819 }
7820
7821 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
7822 uint16_t uGuestIp = pMixedCtx->ip;
7823 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
7824 {
7825 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
7826 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
7827 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
7828 }
7829 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
7830 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
7831
7832 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
7833 X86IDTR16 IdtEntry;
7834 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
7835 rc = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
7836 AssertRCReturn(rc, rc);
7837
7838 /* Construct the stack frame for the interrupt/exception handler. */
7839 rc = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
7840 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
7841 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
7842 AssertRCReturn(rc, rc);
7843
7844 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
7845 if (rc == VINF_SUCCESS)
7846 {
7847 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
7848 pMixedCtx->rip = IdtEntry.offSel;
7849 pMixedCtx->cs.Sel = IdtEntry.uSel;
7850 pMixedCtx->cs.ValidSel = IdtEntry.uSel;
7851 pMixedCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
7852 if ( uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
7853 && uVector == X86_XCPT_PF)
7854 pMixedCtx->cr2 = GCPtrFaultAddress;
7855
7856 /* If any other guest-state bits are changed here, make sure to update
7857 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */
7858 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS
7859 | HM_CHANGED_GUEST_RIP
7860 | HM_CHANGED_GUEST_RFLAGS
7861 | HM_CHANGED_GUEST_RSP);
7862
7863 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
7864 if (*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
7865 {
7866 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
7867 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
7868 Log4(("Clearing inhibition due to STI.\n"));
7869 *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
7870 }
7871 Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
7872 u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->eflags.u, pMixedCtx->cs.Sel, pMixedCtx->eip));
7873
7874 /* The event has been truly dispatched. Mark it as no longer pending so we don't attempt to 'undo'
7875 it, if we are returning to ring-3 before executing guest code. */
7876 pVCpu->hm.s.Event.fPending = false;
7877
7878 /* Make hmR0VmxPreRunGuest return if we're stepping since we've changed cs:rip. */
7879 if (fStepping)
7880 rc = VINF_EM_DBG_STEPPED;
7881 }
7882 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping));
7883 return rc;
7884 }
7885
7886 /*
7887 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
7888 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
7889 */
7890 u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7891 }
7892
7893 /* Validate. */
7894 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
7895 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(u32IntInfo)); /* Bit 12 MBZ. */
7896 Assert(!(u32IntInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
7897
7898 /* Inject. */
7899 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
7900 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo))
7901 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
7902 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
7903
7904 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
7905 && uVector == X86_XCPT_PF)
7906 pMixedCtx->cr2 = GCPtrFaultAddress;
7907
7908 Log4(("Injecting vcpu[%RU32] u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", pVCpu->idCpu,
7909 u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
7910
7911 AssertRCReturn(rc, rc);
7912 return rc;
7913}
7914
7915
7916/**
7917 * Clears the interrupt-window exiting control in the VMCS and if necessary
7918 * clears the current event in the VMCS as well.
7919 *
7920 * @returns VBox status code.
7921 * @param pVCpu The cross context virtual CPU structure.
7922 *
7923 * @remarks Use this function only to clear events that have not yet been
7924 * delivered to the guest but are injected in the VMCS!
7925 * @remarks No-long-jump zone!!!
7926 */
7927static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu)
7928{
7929 Log4Func(("vcpu[%d]\n", pVCpu->idCpu));
7930
7931 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
7932 hmR0VmxClearIntWindowExitVmcs(pVCpu);
7933
7934 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)
7935 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
7936}
7937
7938
7939/**
7940 * Enters the VT-x session.
7941 *
7942 * @returns VBox status code.
7943 * @param pVM The cross context VM structure.
7944 * @param pVCpu The cross context virtual CPU structure.
7945 * @param pCpu Pointer to the CPU info struct.
7946 */
7947VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
7948{
7949 AssertPtr(pVM);
7950 AssertPtr(pVCpu);
7951 Assert(pVM->hm.s.vmx.fSupported);
7952 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7953 NOREF(pCpu); NOREF(pVM);
7954
7955 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
7956 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
7957
7958#ifdef VBOX_STRICT
7959 /* Make sure we're in VMX root mode. */
7960 RTCCUINTREG u32HostCR4 = ASMGetCR4();
7961 if (!(u32HostCR4 & X86_CR4_VMXE))
7962 {
7963 LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
7964 return VERR_VMX_X86_CR4_VMXE_CLEARED;
7965 }
7966#endif
7967
7968 /*
7969 * Load the VCPU's VMCS as the current (and active) one.
7970 */
7971 Assert(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR);
7972 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7973 if (RT_FAILURE(rc))
7974 return rc;
7975
7976 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
7977 pVCpu->hm.s.fLeaveDone = false;
7978 Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
7979
7980 return VINF_SUCCESS;
7981}
7982
7983
7984/**
7985 * The thread-context callback (only on platforms which support it).
7986 *
7987 * @param enmEvent The thread-context event.
7988 * @param pVCpu The cross context virtual CPU structure.
7989 * @param fGlobalInit Whether global VT-x/AMD-V init. was used.
7990 * @thread EMT(pVCpu)
7991 */
7992VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
7993{
7994 NOREF(fGlobalInit);
7995
7996 switch (enmEvent)
7997 {
7998 case RTTHREADCTXEVENT_OUT:
7999 {
8000 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8001 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
8002 VMCPU_ASSERT_EMT(pVCpu);
8003
8004 PVM pVM = pVCpu->CTX_SUFF(pVM);
8005 PCPUMCTX pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu);
8006
8007 /* No longjmps (logger flushes, locks) in this fragile context. */
8008 VMMRZCallRing3Disable(pVCpu);
8009 Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId()));
8010
8011 /*
8012 * Restore host-state (FPU, debug etc.)
8013 */
8014 if (!pVCpu->hm.s.fLeaveDone)
8015 {
8016 /* Do -not- save guest-state here as we might already be in the middle of saving it (esp. bad if we are
8017 holding the PGM lock while saving the guest state (see hmR0VmxSaveGuestControlRegs()). */
8018 hmR0VmxLeave(pVM, pVCpu, pMixedCtx, false /* fSaveGuestState */);
8019 pVCpu->hm.s.fLeaveDone = true;
8020 }
8021
8022 /* Leave HM context, takes care of local init (term). */
8023 int rc = HMR0LeaveCpu(pVCpu);
8024 AssertRC(rc); NOREF(rc);
8025
8026 /* Restore longjmp state. */
8027 VMMRZCallRing3Enable(pVCpu);
8028 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreempt);
8029 break;
8030 }
8031
8032 case RTTHREADCTXEVENT_IN:
8033 {
8034 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8035 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
8036 VMCPU_ASSERT_EMT(pVCpu);
8037
8038 /* No longjmps here, as we don't want to trigger preemption (& its hook) while resuming. */
8039 VMMRZCallRing3Disable(pVCpu);
8040 Log4Func(("Resumed: HostCpuId=%u\n", RTMpCpuId()));
8041
8042 /* Initialize the bare minimum state required for HM. This takes care of
8043 initializing VT-x if necessary (onlined CPUs, local init etc.) */
8044 int rc = HMR0EnterCpu(pVCpu);
8045 AssertRC(rc);
8046 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
8047
8048 /* Load the active VMCS as the current one. */
8049 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR)
8050 {
8051 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8052 AssertRC(rc); NOREF(rc);
8053 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8054 Log4Func(("Resumed: Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8055 }
8056 pVCpu->hm.s.fLeaveDone = false;
8057
8058 /* Restore longjmp state. */
8059 VMMRZCallRing3Enable(pVCpu);
8060 break;
8061 }
8062
8063 default:
8064 break;
8065 }
8066}
8067
8068
8069/**
8070 * Saves the host state in the VMCS host-state.
8071 * Sets up the VM-exit MSR-load area.
8072 *
8073 * The CPU state will be loaded from these fields on every successful VM-exit.
8074 *
8075 * @returns VBox status code.
8076 * @param pVM The cross context VM structure.
8077 * @param pVCpu The cross context virtual CPU structure.
8078 *
8079 * @remarks No-long-jump zone!!!
8080 */
8081static int hmR0VmxSaveHostState(PVM pVM, PVMCPU pVCpu)
8082{
8083 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8084
8085 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
8086 return VINF_SUCCESS;
8087
8088 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
8089 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8090
8091 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
8092 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8093
8094 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
8095 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8096
8097 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
8098 return rc;
8099}
8100
8101
8102/**
8103 * Saves the host state in the VMCS host-state.
8104 *
8105 * @returns VBox status code.
8106 * @param pVM The cross context VM structure.
8107 * @param pVCpu The cross context virtual CPU structure.
8108 *
8109 * @remarks No-long-jump zone!!!
8110 */
8111VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
8112{
8113 AssertPtr(pVM);
8114 AssertPtr(pVCpu);
8115
8116 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8117
8118 /* Save the host state here while entering HM context. When thread-context hooks are used, we might get preempted
8119 and have to resave the host state but most of the time we won't be, so do it here before we disable interrupts. */
8120 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8121 return hmR0VmxSaveHostState(pVM, pVCpu);
8122}
8123
8124
8125/**
8126 * Loads the guest state into the VMCS guest-state area.
8127 *
8128 * The will typically be done before VM-entry when the guest-CPU state and the
8129 * VMCS state may potentially be out of sync.
8130 *
8131 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas. Sets up the
8132 * VM-entry controls.
8133 * Sets up the appropriate VMX non-root function to execute guest code based on
8134 * the guest CPU mode.
8135 *
8136 * @returns VBox status code.
8137 * @param pVM The cross context VM structure.
8138 * @param pVCpu The cross context virtual CPU structure.
8139 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8140 * out-of-sync. Make sure to update the required fields
8141 * before using them.
8142 *
8143 * @remarks No-long-jump zone!!!
8144 */
8145static int hmR0VmxLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
8146{
8147 AssertPtr(pVM);
8148 AssertPtr(pVCpu);
8149 AssertPtr(pMixedCtx);
8150 HMVMX_ASSERT_PREEMPT_SAFE();
8151
8152 VMMRZCallRing3Disable(pVCpu);
8153 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8154
8155 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8156
8157 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
8158
8159 /* Determine real-on-v86 mode. */
8160 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
8161 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
8162 && CPUMIsGuestInRealModeEx(pMixedCtx))
8163 {
8164 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
8165 }
8166
8167 /*
8168 * Load the guest-state into the VMCS.
8169 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
8170 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
8171 */
8172 int rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
8173 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8174
8175 /* This needs to be done after hmR0VmxSetupVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */
8176 rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
8177 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8178
8179 /* This needs to be done after hmR0VmxSetupVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */
8180 rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
8181 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8182
8183 rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
8184 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8185
8186 rc = hmR0VmxLoadGuestCR3AndCR4(pVCpu, pMixedCtx);
8187 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestCR3AndCR4: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8188
8189 /* Assumes pMixedCtx->cr0 is up-to-date (strict builds require CR0 for segment register validation checks). */
8190 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
8191 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8192
8193 /* This needs to be done after hmR0VmxLoadGuestEntryCtls() and hmR0VmxLoadGuestExitCtls() as it may alter controls if we
8194 determine we don't have to swap EFER after all. */
8195 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
8196 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadSharedMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8197
8198 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
8199 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8200
8201 rc = hmR0VmxLoadGuestXcptIntercepts(pVCpu, pMixedCtx);
8202 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestXcptIntercepts! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8203
8204 /*
8205 * Loading Rflags here is fine, even though Rflags.TF might depend on guest debug state (which is not loaded here).
8206 * It is re-evaluated and updated if necessary in hmR0VmxLoadSharedState().
8207 */
8208 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
8209 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestRipRspRflags! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8210
8211 /* Clear any unused and reserved bits. */
8212 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
8213
8214 VMMRZCallRing3Enable(pVCpu);
8215
8216 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
8217 return rc;
8218}
8219
8220
8221/**
8222 * Loads the state shared between the host and guest into the VMCS.
8223 *
8224 * @param pVM The cross context VM structure.
8225 * @param pVCpu The cross context virtual CPU structure.
8226 * @param pCtx Pointer to the guest-CPU context.
8227 *
8228 * @remarks No-long-jump zone!!!
8229 */
8230static void hmR0VmxLoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8231{
8232 NOREF(pVM);
8233
8234 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8235 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8236
8237 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
8238 {
8239 int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx);
8240 AssertRC(rc);
8241 }
8242
8243 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
8244 {
8245 int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx);
8246 AssertRC(rc);
8247
8248 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
8249 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
8250 {
8251 rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx);
8252 AssertRC(rc);
8253 }
8254 }
8255
8256 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS))
8257 {
8258#if HC_ARCH_BITS == 64
8259 if (pVM->hm.s.fAllow64BitGuests)
8260 hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
8261#endif
8262 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
8263 }
8264
8265 /* Loading CR0, debug state might have changed intercepts, update VMCS. */
8266 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
8267 {
8268 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_AC));
8269 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB));
8270 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
8271 AssertRC(rc);
8272 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
8273 }
8274
8275 AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
8276 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8277}
8278
8279
8280/**
8281 * Worker for loading the guest-state bits in the inner VT-x execution loop.
8282 *
8283 * @param pVM The cross context VM structure.
8284 * @param pVCpu The cross context virtual CPU structure.
8285 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8286 * out-of-sync. Make sure to update the required fields
8287 * before using them.
8288 */
8289DECLINLINE(void) hmR0VmxLoadGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
8290{
8291 HMVMX_ASSERT_PREEMPT_SAFE();
8292
8293 Log5(("LoadFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8294#ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
8295 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
8296#endif
8297
8298 if (HMCPU_CF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP))
8299 {
8300 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
8301 AssertRC(rc);
8302 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
8303 }
8304 else if (HMCPU_CF_VALUE(pVCpu))
8305 {
8306 int rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx);
8307 AssertRC(rc);
8308 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
8309 }
8310
8311 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
8312 AssertMsg( !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
8313 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
8314 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8315}
8316
8317
8318/**
8319 * Does the preparations before executing guest code in VT-x.
8320 *
8321 * This may cause longjmps to ring-3 and may even result in rescheduling to the
8322 * recompiler/IEM. We must be cautious what we do here regarding committing
8323 * guest-state information into the VMCS assuming we assuredly execute the
8324 * guest in VT-x mode.
8325 *
8326 * If we fall back to the recompiler/IEM after updating the VMCS and clearing
8327 * the common-state (TRPM/forceflags), we must undo those changes so that the
8328 * recompiler/IEM can (and should) use them when it resumes guest execution.
8329 * Otherwise such operations must be done when we can no longer exit to ring-3.
8330 *
8331 * @returns Strict VBox status code.
8332 * @retval VINF_SUCCESS if we can proceed with running the guest, interrupts
8333 * have been disabled.
8334 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a
8335 * double-fault into the guest.
8336 * @retval VINF_EM_DBG_STEPPED if @a fStepping is true and an event was
8337 * dispatched directly.
8338 * @retval VINF_* scheduling changes, we have to go back to ring-3.
8339 *
8340 * @param pVM The cross context VM structure.
8341 * @param pVCpu The cross context virtual CPU structure.
8342 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8343 * out-of-sync. Make sure to update the required fields
8344 * before using them.
8345 * @param pVmxTransient Pointer to the VMX transient structure.
8346 * @param fStepping Set if called from hmR0VmxRunGuestCodeStep(). Makes
8347 * us ignore some of the reasons for returning to
8348 * ring-3, and return VINF_EM_DBG_STEPPED if event
8349 * dispatching took place.
8350 */
8351static int hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping)
8352{
8353 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8354
8355#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
8356 PGMRZDynMapFlushAutoSet(pVCpu);
8357#endif
8358
8359 /* Check force flag actions that might require us to go back to ring-3. */
8360 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
8361 if (rc != VINF_SUCCESS)
8362 return rc;
8363
8364#ifndef IEM_VERIFICATION_MODE_FULL
8365 /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
8366 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
8367 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
8368 {
8369 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
8370 RTGCPHYS GCPhysApicBase;
8371 GCPhysApicBase = pMixedCtx->msrApicBase;
8372 GCPhysApicBase &= PAGE_BASE_GC_MASK;
8373
8374 /* Unalias any existing mapping. */
8375 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
8376 AssertRCReturn(rc, rc);
8377
8378 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
8379 Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));
8380 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
8381 AssertRCReturn(rc, rc);
8382
8383 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
8384 }
8385#endif /* !IEM_VERIFICATION_MODE_FULL */
8386
8387 if (TRPMHasTrap(pVCpu))
8388 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
8389 hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);
8390
8391 /*
8392 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus needs to be done with
8393 * longjmps or interrupts + preemption enabled. Event injection might also result in triple-faulting the VM.
8394 */
8395 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fStepping);
8396 if (RT_UNLIKELY(rc != VINF_SUCCESS))
8397 {
8398 Assert(rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping));
8399 return rc;
8400 }
8401
8402 /*
8403 * Load the guest state bits, we can handle longjmps/getting preempted here.
8404 *
8405 * If we are injecting events to a real-on-v86 mode guest, we will have to update
8406 * RIP and some segment registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs().
8407 * Hence, this needs to be done -after- injection of events.
8408 */
8409 hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);
8410
8411 /*
8412 * No longjmps to ring-3 from this point on!!!
8413 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
8414 * This also disables flushing of the R0-logger instance (if any).
8415 */
8416 VMMRZCallRing3Disable(pVCpu);
8417
8418 /*
8419 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
8420 * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
8421 *
8422 * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
8423 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
8424 *
8425 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
8426 * executing guest code.
8427 */
8428 pVmxTransient->fEFlags = ASMIntDisableFlags();
8429 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
8430 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
8431 && ( !fStepping /* Optimized for the non-stepping case, of course. */
8432 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) )
8433 {
8434 ASMSetFlags(pVmxTransient->fEFlags);
8435 VMMRZCallRing3Enable(pVCpu);
8436 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
8437 return VINF_EM_RAW_TO_R3;
8438 }
8439
8440 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
8441 {
8442 ASMSetFlags(pVmxTransient->fEFlags);
8443 VMMRZCallRing3Enable(pVCpu);
8444 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
8445 return VINF_EM_RAW_INTERRUPT;
8446 }
8447
8448 /* We've injected any pending events. This is really the point of no return (to ring-3). */
8449 pVCpu->hm.s.Event.fPending = false;
8450
8451 return VINF_SUCCESS;
8452}
8453
8454
8455/**
8456 * Prepares to run guest code in VT-x and we've committed to doing so. This
8457 * means there is no backing out to ring-3 or anywhere else at this
8458 * point.
8459 *
8460 * @param pVM The cross context VM structure.
8461 * @param pVCpu The cross context virtual CPU structure.
8462 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8463 * out-of-sync. Make sure to update the required fields
8464 * before using them.
8465 * @param pVmxTransient Pointer to the VMX transient structure.
8466 *
8467 * @remarks Called with preemption disabled.
8468 * @remarks No-long-jump zone!!!
8469 */
8470static void hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8471{
8472 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8473 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8474 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8475
8476 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8477 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
8478
8479#ifdef HMVMX_ALWAYS_SWAP_FPU_STATE
8480 if (!CPUMIsGuestFPUStateActive(pVCpu))
8481 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8482 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8483#endif
8484
8485 if ( pVCpu->hm.s.fPreloadGuestFpu
8486 && !CPUMIsGuestFPUStateActive(pVCpu))
8487 {
8488 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8489 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
8490 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8491 }
8492
8493 /*
8494 * Lazy-update of the host MSRs values in the auto-load/store MSR area.
8495 */
8496 if ( !pVCpu->hm.s.vmx.fUpdatedHostMsrs
8497 && pVCpu->hm.s.vmx.cMsrs > 0)
8498 {
8499 hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu);
8500 }
8501
8502 /*
8503 * Load the host state bits as we may've been preempted (only happens when
8504 * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM).
8505 */
8506 /** @todo Why should hmR0VmxSetupVMRunHandler() changing pfnStartVM have
8507 * any effect to the host state needing to be saved? */
8508 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
8509 {
8510 /* This ASSUMES that pfnStartVM has been set up already. */
8511 int rc = hmR0VmxSaveHostState(pVM, pVCpu);
8512 AssertRC(rc);
8513 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreemptSaveHostState);
8514 }
8515 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT));
8516
8517 /*
8518 * Load the state shared between host and guest (FPU, debug, lazy MSRs).
8519 */
8520 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
8521 hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx);
8522 AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8523
8524 /* Store status of the shared guest-host state at the time of VM-entry. */
8525#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
8526 if (CPUMIsGuestInLongModeEx(pMixedCtx))
8527 {
8528 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
8529 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
8530 }
8531 else
8532#endif
8533 {
8534 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
8535 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
8536 }
8537 pVmxTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);
8538
8539 /*
8540 * Cache the TPR-shadow for checking on every VM-exit if it might have changed.
8541 */
8542 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8543 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
8544
8545 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
8546 RTCPUID idCurrentCpu = pCpu->idCpu;
8547 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
8548 || idCurrentCpu != pVCpu->hm.s.idLastCpu)
8549 {
8550 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVM, pVCpu);
8551 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
8552 }
8553
8554 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */
8555 hmR0VmxFlushTaggedTlb(pVCpu, pCpu); /* Invalidate the appropriate guest entries from the TLB. */
8556 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
8557 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */
8558
8559 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
8560
8561 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
8562 to start executing. */
8563
8564 /*
8565 * Load the TSC_AUX MSR when we are not intercepting RDTSCP.
8566 */
8567 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
8568 {
8569 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8570 {
8571 bool fMsrUpdated;
8572 int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
8573 AssertRC(rc2);
8574 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS));
8575
8576 rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMR0GetGuestTscAux(pVCpu), true /* fUpdateHostMsr */,
8577 &fMsrUpdated);
8578 AssertRC(rc2);
8579 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8580
8581 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */
8582 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
8583 }
8584 else
8585 {
8586 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX);
8587 Assert(!pVCpu->hm.s.vmx.cMsrs || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8588 }
8589 }
8590
8591#ifdef VBOX_STRICT
8592 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu);
8593 hmR0VmxCheckHostEferMsr(pVCpu);
8594 AssertRC(hmR0VmxCheckVmcsCtls(pVCpu));
8595#endif
8596#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
8597 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
8598 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
8599 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
8600#endif
8601}
8602
8603
8604/**
8605 * Performs some essential restoration of state after running guest code in
8606 * VT-x.
8607 *
8608 * @param pVM The cross context VM structure.
8609 * @param pVCpu The cross context virtual CPU structure.
8610 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
8611 * out-of-sync. Make sure to update the required fields
8612 * before using them.
8613 * @param pVmxTransient Pointer to the VMX transient structure.
8614 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
8615 *
8616 * @remarks Called with interrupts disabled, and returns with interrups enabled!
8617 *
8618 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
8619 * unconditionally when it is safe to do so.
8620 */
8621static void hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
8622{
8623 NOREF(pVM);
8624
8625 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8626
8627 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
8628 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */
8629 HMVMXCPU_GST_RESET_TO(pVCpu, 0); /* Exits/longjmps to ring-3 requires saving the guest state. */
8630 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
8631 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
8632 pVmxTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
8633
8634 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8635 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset);
8636
8637 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
8638 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
8639 Assert(!ASMIntAreEnabled());
8640 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8641
8642#ifdef HMVMX_ALWAYS_SWAP_FPU_STATE
8643 if (CPUMIsGuestFPUStateActive(pVCpu))
8644 {
8645 hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8646 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
8647 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8648 }
8649#endif
8650
8651#if HC_ARCH_BITS == 64
8652 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Host state messed up by VT-x, we must restore. */
8653#endif
8654 pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
8655#ifdef VBOX_STRICT
8656 hmR0VmxCheckHostEferMsr(pVCpu); /* Verify that VMRUN/VMLAUNCH didn't modify host EFER. */
8657#endif
8658 ASMSetFlags(pVmxTransient->fEFlags); /* Enable interrupts. */
8659 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
8660
8661 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
8662 uint32_t uExitReason;
8663 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
8664 rc |= hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
8665 AssertRC(rc);
8666 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
8667 pVmxTransient->fVMEntryFailed = VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uEntryIntInfo);
8668
8669 /* Update the VM-exit history array. */
8670 HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmxTransient->uExitReason);
8671
8672 /* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */
8673 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
8674 {
8675 Log4(("VM-entry failure: pVCpu=%p idCpu=%RU32 rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", pVCpu, pVCpu->idCpu, rcVMRun,
8676 pVmxTransient->fVMEntryFailed));
8677 return;
8678 }
8679
8680 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
8681 {
8682 /** @todo We can optimize this by only syncing with our force-flags when
8683 * really needed and keeping the VMCS state as it is for most
8684 * VM-exits. */
8685 /* Update the guest interruptibility-state from the VMCS. */
8686 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
8687
8688#if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
8689 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8690 AssertRC(rc);
8691#elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
8692 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8693 AssertRC(rc);
8694#endif
8695
8696 /*
8697 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
8698 * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3 and which is
8699 * why it's done here as it's easier and no less efficient to deal with it here than making hmR0VmxSaveGuestState()
8700 * cope with longjmps safely (see VMCPU_FF_HM_UPDATE_CR3 handling).
8701 */
8702 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8703 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
8704 {
8705 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
8706 AssertRC(rc);
8707 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
8708 }
8709 }
8710}
8711
8712
8713/**
8714 * Runs the guest code using VT-x the normal way.
8715 *
8716 * @returns VBox status code.
8717 * @param pVM The cross context VM structure.
8718 * @param pVCpu The cross context virtual CPU structure.
8719 * @param pCtx Pointer to the guest-CPU context.
8720 *
8721 * @note Mostly the same as hmR0VmxRunGuestCodeStep().
8722 */
8723static int hmR0VmxRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8724{
8725 VMXTRANSIENT VmxTransient;
8726 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
8727 int rc = VERR_INTERNAL_ERROR_5;
8728 uint32_t cLoops = 0;
8729
8730 for (;; cLoops++)
8731 {
8732 Assert(!HMR0SuspendPending());
8733 HMVMX_ASSERT_CPU_SAFE();
8734
8735 /* Preparatory work for running guest code, this may force us to return
8736 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
8737 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
8738 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, false /* fStepping */);
8739 if (rc != VINF_SUCCESS)
8740 break;
8741
8742 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
8743 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
8744 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
8745
8746 /* Restore any residual host-state and save any bits shared between host
8747 and guest into the guest-CPU state. Re-enables interrupts! */
8748 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
8749
8750 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
8751 if (RT_UNLIKELY(rc != VINF_SUCCESS))
8752 {
8753 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
8754 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
8755 return rc;
8756 }
8757
8758 /* Profile the VM-exit. */
8759 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
8760 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
8761 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
8762 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
8763 HMVMX_START_EXIT_DISPATCH_PROF();
8764
8765 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
8766 if (RT_UNLIKELY(VBOXVMM_R0_HMVMX_VMEXIT_ENABLED()))
8767 {
8768 hmR0VmxReadExitQualificationVmcs(pVCpu, &VmxTransient);
8769 hmR0VmxSaveGuestState(pVCpu, pCtx);
8770 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pCtx, VmxTransient.uExitReason, VmxTransient.uExitQualification);
8771 }
8772
8773 /* Handle the VM-exit. */
8774#ifdef HMVMX_USE_FUNCTION_TABLE
8775 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
8776#else
8777 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
8778#endif
8779 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
8780 if (rc != VINF_SUCCESS)
8781 break;
8782 if (cLoops > pVM->hm.s.cMaxResumeLoops)
8783 {
8784 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
8785 rc = VINF_EM_RAW_INTERRUPT;
8786 break;
8787 }
8788 }
8789
8790 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
8791 return rc;
8792}
8793
8794
8795/**
8796 * Single steps guest code using VT-x.
8797 *
8798 * @returns VBox status code.
8799 * @param pVM The cross context VM structure.
8800 * @param pVCpu The cross context virtual CPU structure.
8801 * @param pCtx Pointer to the guest-CPU context.
8802 *
8803 * @note Mostly the same as hmR0VmxRunGuestCodeNormal().
8804 */
8805static int hmR0VmxRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8806{
8807 VMXTRANSIENT VmxTransient;
8808 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
8809 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
8810 uint32_t cLoops = 0;
8811 uint16_t uCsStart = pCtx->cs.Sel;
8812 uint64_t uRipStart = pCtx->rip;
8813
8814 for (;; cLoops++)
8815 {
8816 Assert(!HMR0SuspendPending());
8817 HMVMX_ASSERT_CPU_SAFE();
8818
8819 /* Preparatory work for running guest code, this may force us to return
8820 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
8821 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
8822 rcStrict = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, true /* fStepping */);
8823 if (rcStrict != VINF_SUCCESS)
8824 break;
8825
8826 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
8827 rcStrict = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
8828 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
8829
8830 /* Restore any residual host-state and save any bits shared between host
8831 and guest into the guest-CPU state. Re-enables interrupts! */
8832 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, VBOXSTRICTRC_TODO(rcStrict));
8833
8834 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
8835 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
8836 {
8837 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
8838 hmR0VmxReportWorldSwitchError(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict), pCtx, &VmxTransient);
8839 return VBOXSTRICTRC_TODO(rcStrict);
8840 }
8841
8842 /* Profile the VM-exit. */
8843 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
8844 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
8845 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
8846 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
8847 HMVMX_START_EXIT_DISPATCH_PROF();
8848
8849 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
8850 if (RT_UNLIKELY(VBOXVMM_R0_HMVMX_VMEXIT_ENABLED()))
8851 {
8852 hmR0VmxReadExitQualificationVmcs(pVCpu, &VmxTransient);
8853 hmR0VmxSaveGuestState(pVCpu, pCtx);
8854 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pCtx, VmxTransient.uExitReason, VmxTransient.uExitQualification);
8855 }
8856
8857 /* Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitStep(). */
8858 rcStrict = hmR0VmxHandleExitStep(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, uCsStart, uRipStart);
8859 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
8860 if (rcStrict != VINF_SUCCESS)
8861 break;
8862 if (cLoops > pVM->hm.s.cMaxResumeLoops)
8863 {
8864 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
8865 rcStrict = VINF_EM_RAW_INTERRUPT;
8866 break;
8867 }
8868
8869 /*
8870 * Did the RIP change, if so, consider it a single step.
8871 * Otherwise, make sure one of the TFs gets set.
8872 */
8873 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pCtx);
8874 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pCtx);
8875 AssertRCReturn(rc2, rc2);
8876 if ( pCtx->rip != uRipStart
8877 || pCtx->cs.Sel != uCsStart)
8878 {
8879 rcStrict = VINF_EM_DBG_STEPPED;
8880 break;
8881 }
8882 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
8883 }
8884
8885 /*
8886 * Clear the X86_EFL_TF if necessary.
8887 */
8888 if (pVCpu->hm.s.fClearTrapFlag)
8889 {
8890 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pCtx);
8891 AssertRCReturn(rc2, rc2);
8892 pVCpu->hm.s.fClearTrapFlag = false;
8893 pCtx->eflags.Bits.u1TF = 0;
8894 }
8895 /** @todo there seems to be issues with the resume flag when the monitor trap
8896 * flag is pending without being used. Seen early in bios init when
8897 * accessing APIC page in protected mode. */
8898
8899 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
8900 return VBOXSTRICTRC_TODO(rcStrict);
8901}
8902
8903
8904/**
8905 * Runs the guest code using VT-x.
8906 *
8907 * @returns VBox status code.
8908 * @param pVM The cross context VM structure.
8909 * @param pVCpu The cross context virtual CPU structure.
8910 * @param pCtx Pointer to the guest-CPU context.
8911 */
8912VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8913{
8914 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8915 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
8916 HMVMX_ASSERT_PREEMPT_SAFE();
8917
8918 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pCtx);
8919
8920 int rc;
8921 if (!pVCpu->hm.s.fSingleInstruction)
8922 rc = hmR0VmxRunGuestCodeNormal(pVM, pVCpu, pCtx);
8923 else
8924 rc = hmR0VmxRunGuestCodeStep(pVM, pVCpu, pCtx);
8925
8926 if (rc == VERR_EM_INTERPRETER)
8927 rc = VINF_EM_RAW_EMULATE_INSTR;
8928 else if (rc == VINF_EM_RESET)
8929 rc = VINF_EM_TRIPLE_FAULT;
8930
8931 int rc2 = hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rc);
8932 if (RT_FAILURE(rc2))
8933 {
8934 pVCpu->hm.s.u32HMError = rc;
8935 rc = rc2;
8936 }
8937 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
8938 return rc;
8939}
8940
8941
8942#ifndef HMVMX_USE_FUNCTION_TABLE
8943DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
8944{
8945#ifdef DEBUG_ramshankar
8946# define SVVMCS() do { int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); AssertRC(rc2); } while (0)
8947# define LDVMCS() do { HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); } while (0)
8948#endif
8949 int rc;
8950 switch (rcReason)
8951 {
8952 case VMX_EXIT_EPT_MISCONFIG: /* SVVMCS(); */ rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8953 case VMX_EXIT_EPT_VIOLATION: /* SVVMCS(); */ rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8954 case VMX_EXIT_IO_INSTR: /* SVVMCS(); */ rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8955 case VMX_EXIT_CPUID: /* SVVMCS(); */ rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8956 case VMX_EXIT_RDTSC: /* SVVMCS(); */ rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8957 case VMX_EXIT_RDTSCP: /* SVVMCS(); */ rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8958 case VMX_EXIT_APIC_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8959 case VMX_EXIT_XCPT_OR_NMI: /* SVVMCS(); */ rc = hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8960 case VMX_EXIT_MOV_CRX: /* SVVMCS(); */ rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8961 case VMX_EXIT_EXT_INT: /* SVVMCS(); */ rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8962 case VMX_EXIT_INT_WINDOW: /* SVVMCS(); */ rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8963 case VMX_EXIT_MWAIT: /* SVVMCS(); */ rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8964 case VMX_EXIT_MONITOR: /* SVVMCS(); */ rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8965 case VMX_EXIT_TASK_SWITCH: /* SVVMCS(); */ rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8966 case VMX_EXIT_PREEMPT_TIMER: /* SVVMCS(); */ rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8967 case VMX_EXIT_RDMSR: /* SVVMCS(); */ rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8968 case VMX_EXIT_WRMSR: /* SVVMCS(); */ rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8969 case VMX_EXIT_MOV_DRX: /* SVVMCS(); */ rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8970 case VMX_EXIT_TPR_BELOW_THRESHOLD: /* SVVMCS(); */ rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8971 case VMX_EXIT_HLT: /* SVVMCS(); */ rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8972 case VMX_EXIT_INVD: /* SVVMCS(); */ rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8973 case VMX_EXIT_INVLPG: /* SVVMCS(); */ rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8974 case VMX_EXIT_RSM: /* SVVMCS(); */ rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8975 case VMX_EXIT_MTF: /* SVVMCS(); */ rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8976 case VMX_EXIT_PAUSE: /* SVVMCS(); */ rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8977 case VMX_EXIT_XDTR_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8978 case VMX_EXIT_TR_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8979 case VMX_EXIT_WBINVD: /* SVVMCS(); */ rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8980 case VMX_EXIT_XSETBV: /* SVVMCS(); */ rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8981 case VMX_EXIT_RDRAND: /* SVVMCS(); */ rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8982 case VMX_EXIT_INVPCID: /* SVVMCS(); */ rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8983 case VMX_EXIT_GETSEC: /* SVVMCS(); */ rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8984 case VMX_EXIT_RDPMC: /* SVVMCS(); */ rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8985 case VMX_EXIT_VMCALL: /* SVVMCS(); */ rc = hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
8986
8987 case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break;
8988 case VMX_EXIT_NMI_WINDOW: rc = hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient); break;
8989 case VMX_EXIT_INIT_SIGNAL: rc = hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient); break;
8990 case VMX_EXIT_SIPI: rc = hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient); break;
8991 case VMX_EXIT_IO_SMI: rc = hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient); break;
8992 case VMX_EXIT_SMI: rc = hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient); break;
8993 case VMX_EXIT_ERR_MSR_LOAD: rc = hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient); break;
8994 case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient); break;
8995 case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient); break;
8996
8997 case VMX_EXIT_VMCLEAR:
8998 case VMX_EXIT_VMLAUNCH:
8999 case VMX_EXIT_VMPTRLD:
9000 case VMX_EXIT_VMPTRST:
9001 case VMX_EXIT_VMREAD:
9002 case VMX_EXIT_VMRESUME:
9003 case VMX_EXIT_VMWRITE:
9004 case VMX_EXIT_VMXOFF:
9005 case VMX_EXIT_VMXON:
9006 case VMX_EXIT_INVEPT:
9007 case VMX_EXIT_INVVPID:
9008 case VMX_EXIT_VMFUNC:
9009 case VMX_EXIT_XSAVES:
9010 case VMX_EXIT_XRSTORS:
9011 rc = hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
9012 break;
9013 case VMX_EXIT_RESERVED_60:
9014 case VMX_EXIT_RDSEED: /* only spurious exits, so undefined */
9015 case VMX_EXIT_RESERVED_62:
9016 default:
9017 rc = hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
9018 break;
9019 }
9020 return rc;
9021}
9022#endif /* !HMVMX_USE_FUNCTION_TABLE */
9023
9024
9025/**
9026 * Single-stepping VM-exit filtering.
9027 *
9028 * This is preprocessing the exits and deciding whether we've gotten far enough
9029 * to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit handling is
9030 * performed.
9031 *
9032 * @returns Strict VBox status code.
9033 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9034 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
9035 * out-of-sync. Make sure to update the required
9036 * fields before using them.
9037 * @param pVmxTransient Pointer to the VMX-transient structure.
9038 * @param uExitReason The VM-exit reason.
9039 * @param uCsStart The CS we started executing (stepping) on.
9040 * @param uRipStart The RIP we started executing (stepping) on.
9041 */
9042DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitStep(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
9043 uint32_t uExitReason, uint16_t uCsStart, uint64_t uRipStart)
9044{
9045 switch (uExitReason)
9046 {
9047 case VMX_EXIT_XCPT_OR_NMI:
9048 {
9049 /* Check for host NMI. */
9050 int rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
9051 AssertRCReturn(rc2, rc2);
9052 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
9053 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9054 return hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient);
9055 /* fall thru */
9056 }
9057
9058 case VMX_EXIT_EPT_MISCONFIG:
9059 case VMX_EXIT_TRIPLE_FAULT:
9060 case VMX_EXIT_APIC_ACCESS:
9061 case VMX_EXIT_TPR_BELOW_THRESHOLD:
9062 case VMX_EXIT_TASK_SWITCH:
9063
9064 /* Instruction specific VM-exits: */
9065 case VMX_EXIT_IO_INSTR:
9066 case VMX_EXIT_CPUID:
9067 case VMX_EXIT_RDTSC:
9068 case VMX_EXIT_RDTSCP:
9069 case VMX_EXIT_MOV_CRX:
9070 case VMX_EXIT_MWAIT:
9071 case VMX_EXIT_MONITOR:
9072 case VMX_EXIT_RDMSR:
9073 case VMX_EXIT_WRMSR:
9074 case VMX_EXIT_MOV_DRX:
9075 case VMX_EXIT_HLT:
9076 case VMX_EXIT_INVD:
9077 case VMX_EXIT_INVLPG:
9078 case VMX_EXIT_RSM:
9079 case VMX_EXIT_PAUSE:
9080 case VMX_EXIT_XDTR_ACCESS:
9081 case VMX_EXIT_TR_ACCESS:
9082 case VMX_EXIT_WBINVD:
9083 case VMX_EXIT_XSETBV:
9084 case VMX_EXIT_RDRAND:
9085 case VMX_EXIT_INVPCID:
9086 case VMX_EXIT_GETSEC:
9087 case VMX_EXIT_RDPMC:
9088 case VMX_EXIT_VMCALL:
9089 case VMX_EXIT_VMCLEAR:
9090 case VMX_EXIT_VMLAUNCH:
9091 case VMX_EXIT_VMPTRLD:
9092 case VMX_EXIT_VMPTRST:
9093 case VMX_EXIT_VMREAD:
9094 case VMX_EXIT_VMRESUME:
9095 case VMX_EXIT_VMWRITE:
9096 case VMX_EXIT_VMXOFF:
9097 case VMX_EXIT_VMXON:
9098 case VMX_EXIT_INVEPT:
9099 case VMX_EXIT_INVVPID:
9100 case VMX_EXIT_VMFUNC:
9101 {
9102 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
9103 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9104 AssertRCReturn(rc2, rc2);
9105 if ( pMixedCtx->rip != uRipStart
9106 || pMixedCtx->cs.Sel != uCsStart)
9107 return VINF_EM_DBG_STEPPED;
9108 break;
9109 }
9110 }
9111
9112 /*
9113 * Normal processing.
9114 */
9115#ifdef HMVMX_USE_FUNCTION_TABLE
9116 return g_apfnVMExitHandlers[uExitReason](pVCpu, pMixedCtx, pVmxTransient);
9117#else
9118 return hmR0VmxHandleExit(pVCpu, pMixedCtx, pVmxTransient, uExitReason);
9119#endif
9120}
9121
9122
9123#ifdef VBOX_STRICT
9124/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
9125# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
9126 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
9127
9128# define HMVMX_ASSERT_PREEMPT_CPUID() \
9129 do { \
9130 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
9131 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
9132 } while (0)
9133
9134# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
9135 do { \
9136 AssertPtr(pVCpu); \
9137 AssertPtr(pMixedCtx); \
9138 AssertPtr(pVmxTransient); \
9139 Assert(pVmxTransient->fVMEntryFailed == false); \
9140 Assert(ASMIntAreEnabled()); \
9141 HMVMX_ASSERT_PREEMPT_SAFE(); \
9142 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
9143 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", pVCpu->idCpu)); \
9144 HMVMX_ASSERT_PREEMPT_SAFE(); \
9145 if (VMMR0IsLogFlushDisabled(pVCpu)) \
9146 HMVMX_ASSERT_PREEMPT_CPUID(); \
9147 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
9148 } while (0)
9149
9150# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
9151 do { \
9152 Log4Func(("\n")); \
9153 } while (0)
9154#else /* nonstrict builds: */
9155# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
9156 do { \
9157 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
9158 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient); \
9159 } while (0)
9160# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while (0)
9161#endif
9162
9163
9164/**
9165 * Advances the guest RIP after reading it from the VMCS.
9166 *
9167 * @returns VBox status code.
9168 * @param pVCpu The cross context virtual CPU structure.
9169 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
9170 * out-of-sync. Make sure to update the required fields
9171 * before using them.
9172 * @param pVmxTransient Pointer to the VMX transient structure.
9173 *
9174 * @remarks No-long-jump zone!!!
9175 */
9176DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9177{
9178 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
9179 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
9180 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
9181 AssertRCReturn(rc, rc);
9182
9183 pMixedCtx->rip += pVmxTransient->cbInstr;
9184 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
9185
9186 /*
9187 * Deliver a debug exception to the guest if it is single-stepping. Don't directly inject a #DB but use the
9188 * pending debug exception field as it takes care of priority of events.
9189 *
9190 * See Intel spec. 32.2.1 "Debug Exceptions".
9191 */
9192 if ( !pVCpu->hm.s.fSingleInstruction
9193 && pMixedCtx->eflags.Bits.u1TF)
9194 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
9195
9196 return rc;
9197}
9198
9199
9200/**
9201 * Tries to determine what part of the guest-state VT-x has deemed as invalid
9202 * and update error record fields accordingly.
9203 *
9204 * @return VMX_IGS_* return codes.
9205 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
9206 * wrong with the guest state.
9207 *
9208 * @param pVM The cross context VM structure.
9209 * @param pVCpu The cross context virtual CPU structure.
9210 * @param pCtx Pointer to the guest-CPU state.
9211 *
9212 * @remarks This function assumes our cache of the VMCS controls
9213 * are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
9214 */
9215static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
9216{
9217#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
9218#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \
9219 uError = (err); \
9220 break; \
9221 } else do { } while (0)
9222
9223 int rc;
9224 uint32_t uError = VMX_IGS_ERROR;
9225 uint32_t u32Val;
9226 bool fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
9227
9228 do
9229 {
9230 /*
9231 * CR0.
9232 */
9233 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
9234 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
9235 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG).
9236 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
9237 if (fUnrestrictedGuest)
9238 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
9239
9240 uint32_t u32GuestCR0;
9241 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCR0);
9242 AssertRCBreak(rc);
9243 HMVMX_CHECK_BREAK((u32GuestCR0 & uSetCR0) == uSetCR0, VMX_IGS_CR0_FIXED1);
9244 HMVMX_CHECK_BREAK(!(u32GuestCR0 & ~uZapCR0), VMX_IGS_CR0_FIXED0);
9245 if ( !fUnrestrictedGuest
9246 && (u32GuestCR0 & X86_CR0_PG)
9247 && !(u32GuestCR0 & X86_CR0_PE))
9248 {
9249 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
9250 }
9251
9252 /*
9253 * CR4.
9254 */
9255 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
9256 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
9257
9258 uint32_t u32GuestCR4;
9259 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCR4);
9260 AssertRCBreak(rc);
9261 HMVMX_CHECK_BREAK((u32GuestCR4 & uSetCR4) == uSetCR4, VMX_IGS_CR4_FIXED1);
9262 HMVMX_CHECK_BREAK(!(u32GuestCR4 & ~uZapCR4), VMX_IGS_CR4_FIXED0);
9263
9264 /*
9265 * IA32_DEBUGCTL MSR.
9266 */
9267 uint64_t u64Val;
9268 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
9269 AssertRCBreak(rc);
9270 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
9271 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
9272 {
9273 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
9274 }
9275 uint64_t u64DebugCtlMsr = u64Val;
9276
9277#ifdef VBOX_STRICT
9278 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
9279 AssertRCBreak(rc);
9280 Assert(u32Val == pVCpu->hm.s.vmx.u32EntryCtls);
9281#endif
9282 bool const fLongModeGuest = RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
9283
9284 /*
9285 * RIP and RFLAGS.
9286 */
9287 uint32_t u32Eflags;
9288#if HC_ARCH_BITS == 64
9289 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
9290 AssertRCBreak(rc);
9291 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
9292 if ( !fLongModeGuest
9293 || !pCtx->cs.Attr.n.u1Long)
9294 {
9295 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
9296 }
9297 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
9298 * must be identical if the "IA-32e mode guest" VM-entry
9299 * control is 1 and CS.L is 1. No check applies if the
9300 * CPU supports 64 linear-address bits. */
9301
9302 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
9303 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
9304 AssertRCBreak(rc);
9305 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
9306 VMX_IGS_RFLAGS_RESERVED);
9307 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
9308 u32Eflags = u64Val;
9309#else
9310 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
9311 AssertRCBreak(rc);
9312 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */
9313 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
9314#endif
9315
9316 if ( fLongModeGuest
9317 || ( fUnrestrictedGuest
9318 && !(u32GuestCR0 & X86_CR0_PE)))
9319 {
9320 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
9321 }
9322
9323 uint32_t u32EntryInfo;
9324 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
9325 AssertRCBreak(rc);
9326 if ( VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
9327 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
9328 {
9329 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
9330 }
9331
9332 /*
9333 * 64-bit checks.
9334 */
9335#if HC_ARCH_BITS == 64
9336 if (fLongModeGuest)
9337 {
9338 HMVMX_CHECK_BREAK(u32GuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
9339 HMVMX_CHECK_BREAK(u32GuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
9340 }
9341
9342 if ( !fLongModeGuest
9343 && (u32GuestCR4 & X86_CR4_PCIDE))
9344 {
9345 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
9346 }
9347
9348 /** @todo CR3 field must be such that bits 63:52 and bits in the range
9349 * 51:32 beyond the processor's physical-address width are 0. */
9350
9351 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
9352 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
9353 {
9354 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
9355 }
9356
9357 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
9358 AssertRCBreak(rc);
9359 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
9360
9361 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
9362 AssertRCBreak(rc);
9363 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
9364#endif
9365
9366 /*
9367 * PERF_GLOBAL MSR.
9368 */
9369 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR)
9370 {
9371 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
9372 AssertRCBreak(rc);
9373 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
9374 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
9375 }
9376
9377 /*
9378 * PAT MSR.
9379 */
9380 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR)
9381 {
9382 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
9383 AssertRCBreak(rc);
9384 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
9385 for (unsigned i = 0; i < 8; i++)
9386 {
9387 uint8_t u8Val = (u64Val & 0xff);
9388 if ( u8Val != 0 /* UC */
9389 && u8Val != 1 /* WC */
9390 && u8Val != 4 /* WT */
9391 && u8Val != 5 /* WP */
9392 && u8Val != 6 /* WB */
9393 && u8Val != 7 /* UC- */)
9394 {
9395 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
9396 }
9397 u64Val >>= 8;
9398 }
9399 }
9400
9401 /*
9402 * EFER MSR.
9403 */
9404 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
9405 {
9406 Assert(pVM->hm.s.vmx.fSupportsVmcsEfer);
9407 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
9408 AssertRCBreak(rc);
9409 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
9410 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
9411 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVCpu->hm.s.vmx.u32EntryCtls
9412 & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
9413 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
9414 HMVMX_CHECK_BREAK( fUnrestrictedGuest
9415 || !(u32GuestCR0 & X86_CR0_PG)
9416 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
9417 VMX_IGS_EFER_LMA_LME_MISMATCH);
9418 }
9419
9420 /*
9421 * Segment registers.
9422 */
9423 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9424 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
9425 if (!(u32Eflags & X86_EFL_VM))
9426 {
9427 /* CS */
9428 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
9429 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
9430 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
9431 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
9432 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9433 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
9434 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9435 /* CS cannot be loaded with NULL in protected mode. */
9436 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
9437 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
9438 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
9439 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
9440 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
9441 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
9442 else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
9443 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
9444 else
9445 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
9446
9447 /* SS */
9448 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9449 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
9450 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
9451 if ( !(pCtx->cr0 & X86_CR0_PE)
9452 || pCtx->cs.Attr.n.u4Type == 3)
9453 {
9454 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
9455 }
9456 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
9457 {
9458 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
9459 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
9460 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
9461 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
9462 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
9463 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9464 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
9465 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9466 }
9467
9468 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
9469 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
9470 {
9471 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
9472 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
9473 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9474 || pCtx->ds.Attr.n.u4Type > 11
9475 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9476 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
9477 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
9478 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
9479 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9480 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
9481 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9482 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9483 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
9484 }
9485 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
9486 {
9487 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
9488 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
9489 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9490 || pCtx->es.Attr.n.u4Type > 11
9491 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9492 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
9493 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
9494 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
9495 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9496 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
9497 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9498 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9499 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
9500 }
9501 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
9502 {
9503 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
9504 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
9505 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9506 || pCtx->fs.Attr.n.u4Type > 11
9507 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
9508 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
9509 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
9510 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
9511 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
9512 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
9513 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
9514 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9515 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
9516 }
9517 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
9518 {
9519 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
9520 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
9521 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9522 || pCtx->gs.Attr.n.u4Type > 11
9523 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
9524 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
9525 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
9526 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
9527 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
9528 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
9529 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
9530 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9531 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
9532 }
9533 /* 64-bit capable CPUs. */
9534#if HC_ARCH_BITS == 64
9535 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
9536 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
9537 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9538 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
9539 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
9540 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
9541 VMX_IGS_LONGMODE_SS_BASE_INVALID);
9542 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
9543 VMX_IGS_LONGMODE_DS_BASE_INVALID);
9544 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
9545 VMX_IGS_LONGMODE_ES_BASE_INVALID);
9546#endif
9547 }
9548 else
9549 {
9550 /* V86 mode checks. */
9551 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
9552 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
9553 {
9554 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
9555 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
9556 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
9557 }
9558 else
9559 {
9560 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
9561 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
9562 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
9563 }
9564
9565 /* CS */
9566 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
9567 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
9568 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
9569 /* SS */
9570 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
9571 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
9572 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
9573 /* DS */
9574 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
9575 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
9576 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
9577 /* ES */
9578 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
9579 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
9580 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
9581 /* FS */
9582 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
9583 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
9584 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
9585 /* GS */
9586 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
9587 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
9588 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
9589 /* 64-bit capable CPUs. */
9590#if HC_ARCH_BITS == 64
9591 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
9592 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
9593 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9594 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
9595 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
9596 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
9597 VMX_IGS_LONGMODE_SS_BASE_INVALID);
9598 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
9599 VMX_IGS_LONGMODE_DS_BASE_INVALID);
9600 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
9601 VMX_IGS_LONGMODE_ES_BASE_INVALID);
9602#endif
9603 }
9604
9605 /*
9606 * TR.
9607 */
9608 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
9609 /* 64-bit capable CPUs. */
9610#if HC_ARCH_BITS == 64
9611 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
9612#endif
9613 if (fLongModeGuest)
9614 {
9615 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
9616 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
9617 }
9618 else
9619 {
9620 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
9621 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
9622 VMX_IGS_TR_ATTR_TYPE_INVALID);
9623 }
9624 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
9625 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
9626 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
9627 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
9628 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
9629 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
9630 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
9631 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
9632
9633 /*
9634 * GDTR and IDTR.
9635 */
9636#if HC_ARCH_BITS == 64
9637 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
9638 AssertRCBreak(rc);
9639 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
9640
9641 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
9642 AssertRCBreak(rc);
9643 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
9644#endif
9645
9646 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
9647 AssertRCBreak(rc);
9648 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
9649
9650 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
9651 AssertRCBreak(rc);
9652 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
9653
9654 /*
9655 * Guest Non-Register State.
9656 */
9657 /* Activity State. */
9658 uint32_t u32ActivityState;
9659 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
9660 AssertRCBreak(rc);
9661 HMVMX_CHECK_BREAK( !u32ActivityState
9662 || (u32ActivityState & MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.Msrs.u64Misc)),
9663 VMX_IGS_ACTIVITY_STATE_INVALID);
9664 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
9665 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
9666 uint32_t u32IntrState;
9667 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32IntrState);
9668 AssertRCBreak(rc);
9669 if ( u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
9670 || u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9671 {
9672 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
9673 }
9674
9675 /** @todo Activity state and injecting interrupts. Left as a todo since we
9676 * currently don't use activity states but ACTIVE. */
9677
9678 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
9679 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
9680
9681 /* Guest interruptibility-state. */
9682 HMVMX_CHECK_BREAK(!(u32IntrState & 0xfffffff0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
9683 HMVMX_CHECK_BREAK((u32IntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
9684 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS))
9685 != ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
9686 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9687 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
9688 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
9689 || !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
9690 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
9691 if (VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo))
9692 {
9693 if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
9694 {
9695 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9696 && !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9697 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
9698 }
9699 else if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9700 {
9701 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9702 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
9703 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
9704 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
9705 }
9706 }
9707 /** @todo Assumes the processor is not in SMM. */
9708 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
9709 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
9710 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
9711 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
9712 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
9713 if ( (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
9714 && VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
9715 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9716 {
9717 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI),
9718 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
9719 }
9720
9721 /* Pending debug exceptions. */
9722#if HC_ARCH_BITS == 64
9723 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val);
9724 AssertRCBreak(rc);
9725 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
9726 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
9727 u32Val = u64Val; /* For pending debug exceptions checks below. */
9728#else
9729 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val);
9730 AssertRCBreak(rc);
9731 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
9732 HMVMX_CHECK_BREAK(!(u32Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
9733#endif
9734
9735 if ( (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9736 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS)
9737 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
9738 {
9739 if ( (u32Eflags & X86_EFL_TF)
9740 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
9741 {
9742 /* Bit 14 is PendingDebug.BS. */
9743 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
9744 }
9745 if ( !(u32Eflags & X86_EFL_TF)
9746 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
9747 {
9748 /* Bit 14 is PendingDebug.BS. */
9749 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
9750 }
9751 }
9752
9753 /* VMCS link pointer. */
9754 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
9755 AssertRCBreak(rc);
9756 if (u64Val != UINT64_C(0xffffffffffffffff))
9757 {
9758 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
9759 /** @todo Bits beyond the processor's physical-address width MBZ. */
9760 /** @todo 32-bit located in memory referenced by value of this field (as a
9761 * physical address) must contain the processor's VMCS revision ID. */
9762 /** @todo SMM checks. */
9763 }
9764
9765 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
9766 * not using Nested Paging? */
9767 if ( pVM->hm.s.fNestedPaging
9768 && !fLongModeGuest
9769 && CPUMIsGuestInPAEModeEx(pCtx))
9770 {
9771 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
9772 AssertRCBreak(rc);
9773 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9774
9775 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
9776 AssertRCBreak(rc);
9777 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9778
9779 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
9780 AssertRCBreak(rc);
9781 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9782
9783 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
9784 AssertRCBreak(rc);
9785 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9786 }
9787
9788 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
9789 if (uError == VMX_IGS_ERROR)
9790 uError = VMX_IGS_REASON_NOT_FOUND;
9791 } while (0);
9792
9793 pVCpu->hm.s.u32HMError = uError;
9794 return uError;
9795
9796#undef HMVMX_ERROR_BREAK
9797#undef HMVMX_CHECK_BREAK
9798}
9799
9800/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9801/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
9802/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
9803
9804/** @name VM-exit handlers.
9805 * @{
9806 */
9807
9808/**
9809 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
9810 */
9811HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9812{
9813 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9814 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
9815 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
9816 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
9817 return VINF_SUCCESS;
9818 return VINF_EM_RAW_INTERRUPT;
9819}
9820
9821
9822/**
9823 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
9824 */
9825HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9826{
9827 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9828 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
9829
9830 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
9831 AssertRCReturn(rc, rc);
9832
9833 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
9834 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)
9835 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
9836 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
9837
9838 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9839 {
9840 /*
9841 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we injected it ourselves and
9842 * anything we inject is not going to cause a VM-exit directly for the event being injected.
9843 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
9844 *
9845 * Dispatch the NMI to the host. See Intel spec. 27.5.5 "Updating Non-Register State".
9846 */
9847 VMXDispatchHostNmi();
9848 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
9849 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
9850 return VINF_SUCCESS;
9851 }
9852
9853 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
9854 rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
9855 if (RT_UNLIKELY(rc != VINF_SUCCESS))
9856 {
9857 if (rc == VINF_HM_DOUBLE_FAULT)
9858 rc = VINF_SUCCESS;
9859 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
9860 return rc;
9861 }
9862
9863 uint32_t uExitIntInfo = pVmxTransient->uExitIntInfo;
9864 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntInfo);
9865 switch (uIntType)
9866 {
9867 case VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT: /* Privileged software exception. (#DB from ICEBP) */
9868 Assert(uVector == X86_XCPT_DB);
9869 /* no break */
9870 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
9871 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT);
9872 /* no break */
9873 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
9874 {
9875 switch (uVector)
9876 {
9877 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
9878 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
9879 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;
9880 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
9881 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
9882 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
9883 case X86_XCPT_AC: rc = hmR0VmxExitXcptAC(pVCpu, pMixedCtx, pVmxTransient); break;
9884#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
9885 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
9886 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9887 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
9888 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9889 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
9890 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9891 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
9892 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9893 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
9894 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9895 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS);
9896 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
9897#endif
9898 default:
9899 {
9900 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
9901 AssertRCReturn(rc, rc);
9902
9903 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
9904 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
9905 {
9906 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
9907 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
9908 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
9909
9910 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
9911 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
9912 AssertRCReturn(rc, rc);
9913 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
9914 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode,
9915 0 /* GCPtrFaultAddress */);
9916 AssertRCReturn(rc, rc);
9917 }
9918 else
9919 {
9920 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
9921 pVCpu->hm.s.u32HMError = uVector;
9922 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
9923 }
9924 break;
9925 }
9926 }
9927 break;
9928 }
9929
9930 default:
9931 {
9932 pVCpu->hm.s.u32HMError = uExitIntInfo;
9933 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
9934 AssertMsgFailed(("Unexpected interruption info %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntInfo)));
9935 break;
9936 }
9937 }
9938 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
9939 return rc;
9940}
9941
9942
9943/**
9944 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
9945 */
9946HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9947{
9948 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9949
9950 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
9951 hmR0VmxClearIntWindowExitVmcs(pVCpu);
9952
9953 /* Deliver the pending interrupts via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
9954 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
9955 return VINF_SUCCESS;
9956}
9957
9958
9959/**
9960 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
9961 */
9962HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9963{
9964 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9965 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)))
9966 {
9967 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
9968 HMVMX_RETURN_UNEXPECTED_EXIT();
9969 }
9970
9971 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));
9972
9973 /*
9974 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
9975 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
9976 */
9977 uint32_t uIntrState = 0;
9978 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
9979 AssertRCReturn(rc, rc);
9980
9981 bool const fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
9982 if ( fBlockSti
9983 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
9984 {
9985 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
9986 }
9987
9988 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
9989 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
9990
9991 /* Deliver the pending NMI via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
9992 return VINF_SUCCESS;
9993}
9994
9995
9996/**
9997 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
9998 */
9999HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10000{
10001 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10002 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
10003 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10004}
10005
10006
10007/**
10008 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
10009 */
10010HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10011{
10012 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10013 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
10014 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10015}
10016
10017
10018/**
10019 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
10020 */
10021HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10022{
10023 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10024 PVM pVM = pVCpu->CTX_SUFF(pVM);
10025 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10026 if (RT_LIKELY(rc == VINF_SUCCESS))
10027 {
10028 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10029 Assert(pVmxTransient->cbInstr == 2);
10030 }
10031 else
10032 {
10033 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
10034 rc = VERR_EM_INTERPRETER;
10035 }
10036 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
10037 return rc;
10038}
10039
10040
10041/**
10042 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
10043 */
10044HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10045{
10046 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10047 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
10048 AssertRCReturn(rc, rc);
10049
10050 if (pMixedCtx->cr4 & X86_CR4_SMXE)
10051 return VINF_EM_RAW_EMULATE_INSTR;
10052
10053 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
10054 HMVMX_RETURN_UNEXPECTED_EXIT();
10055}
10056
10057
10058/**
10059 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10060 */
10061HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10062{
10063 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10064 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10065 AssertRCReturn(rc, rc);
10066
10067 PVM pVM = pVCpu->CTX_SUFF(pVM);
10068 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10069 if (RT_LIKELY(rc == VINF_SUCCESS))
10070 {
10071 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10072 Assert(pVmxTransient->cbInstr == 2);
10073 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
10074 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
10075 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10076 }
10077 else
10078 rc = VERR_EM_INTERPRETER;
10079 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
10080 return rc;
10081}
10082
10083
10084/**
10085 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10086 */
10087HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10088{
10089 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10090 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10091 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
10092 AssertRCReturn(rc, rc);
10093
10094 PVM pVM = pVCpu->CTX_SUFF(pVM);
10095 rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
10096 if (RT_LIKELY(rc == VINF_SUCCESS))
10097 {
10098 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10099 Assert(pVmxTransient->cbInstr == 3);
10100 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
10101 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
10102 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10103 }
10104 else
10105 {
10106 AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
10107 rc = VERR_EM_INTERPRETER;
10108 }
10109 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
10110 return rc;
10111}
10112
10113
10114/**
10115 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10116 */
10117HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10118{
10119 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10120 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10121 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
10122 AssertRCReturn(rc, rc);
10123
10124 PVM pVM = pVCpu->CTX_SUFF(pVM);
10125 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10126 if (RT_LIKELY(rc == VINF_SUCCESS))
10127 {
10128 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10129 Assert(pVmxTransient->cbInstr == 2);
10130 }
10131 else
10132 {
10133 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
10134 rc = VERR_EM_INTERPRETER;
10135 }
10136 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
10137 return rc;
10138}
10139
10140
10141/**
10142 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
10143 */
10144HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10145{
10146 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10147 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);
10148
10149 if (pVCpu->hm.s.fHypercallsEnabled)
10150 {
10151#if 0
10152 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10153 AssertRCReturn(rc, rc);
10154#else
10155 /* Aggressive state sync. for now. */
10156 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
10157 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* For long-mode checks in gimKvmHypercall(). */
10158#endif
10159 rc |= hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10160 AssertRCReturn(rc, rc);
10161
10162 /** @todo pre-increment RIP before hypercall will break when we have to implement
10163 * continuing hypercalls (e.g. Hyper-V). */
10164 rc = GIMHypercall(pVCpu, pMixedCtx);
10165 /* If the hypercall changes anything other than guest general-purpose registers,
10166 we would need to reload the guest changed bits here before VM-entry. */
10167 return rc;
10168 }
10169 else
10170 {
10171 Log4(("hmR0VmxExitVmcall: Hypercalls not enabled\n"));
10172 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
10173 }
10174
10175 return VINF_SUCCESS;
10176}
10177
10178
10179/**
10180 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10181 */
10182HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10183{
10184 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10185 PVM pVM = pVCpu->CTX_SUFF(pVM);
10186 Assert(!pVM->hm.s.fNestedPaging);
10187
10188 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10189 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
10190 AssertRCReturn(rc, rc);
10191
10192 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
10193 rc = VBOXSTRICTRC_VAL(rc2);
10194 if (RT_LIKELY(rc == VINF_SUCCESS))
10195 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10196 else
10197 {
10198 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RX64 failed with %Rrc\n",
10199 pVmxTransient->uExitQualification, rc));
10200 }
10201 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
10202 return rc;
10203}
10204
10205
10206/**
10207 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10208 */
10209HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10210{
10211 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10212 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10213 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10214 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10215 AssertRCReturn(rc, rc);
10216
10217 PVM pVM = pVCpu->CTX_SUFF(pVM);
10218 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10219 if (RT_LIKELY(rc == VINF_SUCCESS))
10220 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10221 else
10222 {
10223 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
10224 rc = VERR_EM_INTERPRETER;
10225 }
10226 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
10227 return rc;
10228}
10229
10230
10231/**
10232 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10233 */
10234HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10235{
10236 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10237 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10238 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10239 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10240 AssertRCReturn(rc, rc);
10241
10242 PVM pVM = pVCpu->CTX_SUFF(pVM);
10243 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10244 rc = VBOXSTRICTRC_VAL(rc2);
10245 if (RT_LIKELY( rc == VINF_SUCCESS
10246 || rc == VINF_EM_HALT))
10247 {
10248 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10249 AssertRCReturn(rc3, rc3);
10250
10251 if ( rc == VINF_EM_HALT
10252 && EMMonitorWaitShouldContinue(pVCpu, pMixedCtx))
10253 {
10254 rc = VINF_SUCCESS;
10255 }
10256 }
10257 else
10258 {
10259 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
10260 rc = VERR_EM_INTERPRETER;
10261 }
10262 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
10263 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
10264 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
10265 return rc;
10266}
10267
10268
10269/**
10270 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
10271 */
10272HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10273{
10274 /*
10275 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
10276 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
10277 * executing VMCALL in VMX root operation. If we get here, something funny is going on.
10278 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
10279 */
10280 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10281 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10282 HMVMX_RETURN_UNEXPECTED_EXIT();
10283}
10284
10285
10286/**
10287 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
10288 */
10289HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10290{
10291 /*
10292 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
10293 * root operation. Only an STM (SMM transfer monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL
10294 * in VMX root mode or receive an SMI. If we get here, something funny is going on.
10295 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
10296 */
10297 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10298 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10299 HMVMX_RETURN_UNEXPECTED_EXIT();
10300}
10301
10302
10303/**
10304 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
10305 */
10306HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10307{
10308 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
10309 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10310 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10311 HMVMX_RETURN_UNEXPECTED_EXIT();
10312}
10313
10314
10315/**
10316 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
10317 */
10318HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10319{
10320 /*
10321 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
10322 * don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
10323 * See Intel spec. 25.3 "Other Causes of VM-exits".
10324 */
10325 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10326 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10327 HMVMX_RETURN_UNEXPECTED_EXIT();
10328}
10329
10330
10331/**
10332 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
10333 * VM-exit.
10334 */
10335HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10336{
10337 /*
10338 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
10339 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery" and Intel spec. 29.3 "VMX Instructions" for "VMXON".
10340 *
10341 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these VM-exits.
10342 * See Intel spec. "23.8 Restrictions on VMX operation".
10343 */
10344 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10345 return VINF_SUCCESS;
10346}
10347
10348
10349/**
10350 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
10351 * VM-exit.
10352 */
10353HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10354{
10355 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10356 return VINF_EM_RESET;
10357}
10358
10359
10360/**
10361 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10362 */
10363HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10364{
10365 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10366 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
10367 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
10368 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10369 AssertRCReturn(rc, rc);
10370
10371 pMixedCtx->rip++;
10372 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
10373 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
10374 rc = VINF_SUCCESS;
10375 else
10376 rc = VINF_EM_HALT;
10377
10378 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
10379 if (rc != VINF_SUCCESS)
10380 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
10381 return rc;
10382}
10383
10384
10385/**
10386 * VM-exit handler for instructions that result in a \#UD exception delivered to
10387 * the guest.
10388 */
10389HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10390{
10391 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10392 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
10393 return VINF_SUCCESS;
10394}
10395
10396
10397/**
10398 * VM-exit handler for expiry of the VMX preemption timer.
10399 */
10400HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10401{
10402 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10403
10404 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
10405 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10406
10407 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
10408 PVM pVM = pVCpu->CTX_SUFF(pVM);
10409 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
10410 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
10411 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
10412}
10413
10414
10415/**
10416 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
10417 */
10418HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10419{
10420 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10421
10422 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
10423 rc |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/);
10424 rc |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
10425 AssertRCReturn(rc, rc);
10426
10427 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbInstr);
10428 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);
10429
10430 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
10431
10432 return VBOXSTRICTRC_TODO(rcStrict);
10433}
10434
10435
10436/**
10437 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10438 */
10439HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10440{
10441 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10442
10443 /* The guest should not invalidate the host CPU's TLBs, fallback to interpreter. */
10444 /** @todo implement EMInterpretInvpcid() */
10445 return VERR_EM_INTERPRETER;
10446}
10447
10448
10449/**
10450 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
10451 * Error VM-exit.
10452 */
10453HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10454{
10455 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10456 AssertRCReturn(rc, rc);
10457
10458 rc = hmR0VmxCheckVmcsCtls(pVCpu);
10459 AssertRCReturn(rc, rc);
10460
10461 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
10462 NOREF(uInvalidReason);
10463
10464#ifdef VBOX_STRICT
10465 uint32_t uIntrState;
10466 RTHCUINTREG uHCReg;
10467 uint64_t u64Val;
10468 uint32_t u32Val;
10469
10470 rc = hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
10471 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
10472 rc |= hmR0VmxReadEntryInstrLenVmcs(pVmxTransient);
10473 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
10474 AssertRCReturn(rc, rc);
10475
10476 Log4(("uInvalidReason %u\n", uInvalidReason));
10477 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
10478 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
10479 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
10480 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));
10481
10482 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
10483 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
10484 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
10485 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
10486 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
10487 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
10488 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
10489 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
10490 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
10491 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
10492 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
10493 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
10494#else
10495 NOREF(pVmxTransient);
10496#endif
10497
10498 HMDumpRegs(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
10499 return VERR_VMX_INVALID_GUEST_STATE;
10500}
10501
10502
10503/**
10504 * VM-exit handler for VM-entry failure due to an MSR-load
10505 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
10506 */
10507HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10508{
10509 NOREF(pVmxTransient);
10510 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
10511 HMVMX_RETURN_UNEXPECTED_EXIT();
10512}
10513
10514
10515/**
10516 * VM-exit handler for VM-entry failure due to a machine-check event
10517 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
10518 */
10519HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10520{
10521 NOREF(pVmxTransient);
10522 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
10523 HMVMX_RETURN_UNEXPECTED_EXIT();
10524}
10525
10526
10527/**
10528 * VM-exit handler for all undefined reasons. Should never ever happen.. in
10529 * theory.
10530 */
10531HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10532{
10533 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
10534 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient);
10535 return VERR_VMX_UNDEFINED_EXIT_CODE;
10536}
10537
10538
10539/**
10540 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
10541 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
10542 * Conditional VM-exit.
10543 */
10544HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10545{
10546 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10547
10548 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
10549 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
10550 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
10551 return VERR_EM_INTERPRETER;
10552 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10553 HMVMX_RETURN_UNEXPECTED_EXIT();
10554}
10555
10556
10557/**
10558 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
10559 */
10560HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10561{
10562 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10563
10564 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
10565 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
10566 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
10567 return VERR_EM_INTERPRETER;
10568 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10569 HMVMX_RETURN_UNEXPECTED_EXIT();
10570}
10571
10572
10573/**
10574 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10575 */
10576HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10577{
10578 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10579
10580 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
10581 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10582 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10583 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10584 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10585 {
10586 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
10587 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
10588 }
10589 AssertRCReturn(rc, rc);
10590 Log4(("ecx=%#RX32\n", pMixedCtx->ecx));
10591
10592#ifdef VBOX_STRICT
10593 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
10594 {
10595 if ( hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)
10596 && pMixedCtx->ecx != MSR_K6_EFER)
10597 {
10598 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
10599 pMixedCtx->ecx));
10600 HMVMX_RETURN_UNEXPECTED_EXIT();
10601 }
10602# if HC_ARCH_BITS == 64
10603 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests
10604 && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10605 {
10606 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
10607 HMVMX_RETURN_UNEXPECTED_EXIT();
10608 }
10609# endif
10610 }
10611#endif
10612
10613 PVM pVM = pVCpu->CTX_SUFF(pVM);
10614 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10615 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
10616 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
10617 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
10618 if (RT_LIKELY(rc == VINF_SUCCESS))
10619 {
10620 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10621 Assert(pVmxTransient->cbInstr == 2);
10622 }
10623 return rc;
10624}
10625
10626
10627/**
10628 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10629 */
10630HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10631{
10632 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10633 PVM pVM = pVCpu->CTX_SUFF(pVM);
10634 int rc = VINF_SUCCESS;
10635
10636 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
10637 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10638 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10639 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10640 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10641 {
10642 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
10643 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
10644 }
10645 AssertRCReturn(rc, rc);
10646 Log4(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", pMixedCtx->ecx, pMixedCtx->edx, pMixedCtx->eax));
10647
10648 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10649 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
10650 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
10651
10652 if (RT_LIKELY(rc == VINF_SUCCESS))
10653 {
10654 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10655
10656 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
10657 if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START
10658 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
10659 {
10660 /* We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register
10661 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before
10662 EMInterpretWrmsr() changes it. */
10663 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
10664 }
10665 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
10666 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10667 else if (pMixedCtx->ecx == MSR_K6_EFER)
10668 {
10669 /*
10670 * If the guest touches EFER we need to update the VM-Entry and VM-Exit controls as well,
10671 * even if it is -not- touching bits that cause paging mode changes (LMA/LME). We care about
10672 * the other bits as well, SCE and NXE. See @bugref{7368}.
10673 */
10674 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS);
10675 }
10676
10677 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */
10678 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10679 {
10680 switch (pMixedCtx->ecx)
10681 {
10682 case MSR_IA32_SYSENTER_CS: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
10683 case MSR_IA32_SYSENTER_EIP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
10684 case MSR_IA32_SYSENTER_ESP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
10685 case MSR_K8_FS_BASE: /* no break */
10686 case MSR_K8_GS_BASE: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break;
10687 case MSR_K6_EFER: /* already handled above */ break;
10688 default:
10689 {
10690 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
10691 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
10692#if HC_ARCH_BITS == 64
10693 else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10694 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
10695#endif
10696 break;
10697 }
10698 }
10699 }
10700#ifdef VBOX_STRICT
10701 else
10702 {
10703 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
10704 switch (pMixedCtx->ecx)
10705 {
10706 case MSR_IA32_SYSENTER_CS:
10707 case MSR_IA32_SYSENTER_EIP:
10708 case MSR_IA32_SYSENTER_ESP:
10709 case MSR_K8_FS_BASE:
10710 case MSR_K8_GS_BASE:
10711 {
10712 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
10713 HMVMX_RETURN_UNEXPECTED_EXIT();
10714 }
10715
10716 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
10717 default:
10718 {
10719 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
10720 {
10721 /* EFER writes are always intercepted, see hmR0VmxLoadGuestMsrs(). */
10722 if (pMixedCtx->ecx != MSR_K6_EFER)
10723 {
10724 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
10725 pMixedCtx->ecx));
10726 HMVMX_RETURN_UNEXPECTED_EXIT();
10727 }
10728 }
10729
10730#if HC_ARCH_BITS == 64
10731 if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10732 {
10733 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
10734 HMVMX_RETURN_UNEXPECTED_EXIT();
10735 }
10736#endif
10737 break;
10738 }
10739 }
10740 }
10741#endif /* VBOX_STRICT */
10742 }
10743 return rc;
10744}
10745
10746
10747/**
10748 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10749 */
10750HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10751{
10752 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10753
10754 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
10755 return VINF_EM_RAW_INTERRUPT;
10756}
10757
10758
10759/**
10760 * VM-exit handler for when the TPR value is lowered below the specified
10761 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10762 */
10763HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10764{
10765 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10766 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
10767
10768 /*
10769 * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
10770 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectPendingEvent() and
10771 * resume guest execution.
10772 */
10773 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
10774 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
10775 return VINF_SUCCESS;
10776}
10777
10778
10779/**
10780 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
10781 * VM-exit.
10782 *
10783 * @retval VINF_SUCCESS when guest execution can continue.
10784 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
10785 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
10786 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
10787 * interpreter.
10788 */
10789HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10790{
10791 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10792 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
10793 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10794 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
10795 AssertRCReturn(rc, rc);
10796
10797 RTGCUINTPTR const uExitQualification = pVmxTransient->uExitQualification;
10798 uint32_t const uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
10799 PVM pVM = pVCpu->CTX_SUFF(pVM);
10800 VBOXSTRICTRC rcStrict;
10801 rc = hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, true /*fNeedRsp*/);
10802 switch (uAccessType)
10803 {
10804 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */
10805 {
10806 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
10807 AssertRCReturn(rc, rc);
10808
10809 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr,
10810 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
10811 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
10812 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT || rcStrict == VINF_PGM_CHANGE_MODE
10813 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10814 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
10815 {
10816 case 0: /* CR0 */
10817 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
10818 Log4(("CRX CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr0));
10819 break;
10820 case 2: /* CR2 */
10821 /* Nothing to do here, CR2 it's not part of the VMCS. */
10822 break;
10823 case 3: /* CR3 */
10824 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
10825 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
10826 Log4(("CRX CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr3));
10827 break;
10828 case 4: /* CR4 */
10829 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
10830 Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n",
10831 VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
10832 break;
10833 case 8: /* CR8 */
10834 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
10835 /* CR8 contains the APIC TPR. Was updated by IEMExecDecodedMovCRxWrite(). */
10836 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
10837 break;
10838 default:
10839 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
10840 break;
10841 }
10842
10843 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
10844 break;
10845 }
10846
10847 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */
10848 {
10849 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
10850 AssertRCReturn(rc, rc);
10851
10852 Assert( !pVM->hm.s.fNestedPaging
10853 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
10854 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
10855
10856 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
10857 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
10858 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
10859
10860 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr,
10861 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
10862 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
10863 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10864 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
10865 Log4(("CRX CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
10866 VBOXSTRICTRC_VAL(rcStrict)));
10867 break;
10868 }
10869
10870 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
10871 {
10872 AssertRCReturn(rc, rc);
10873 rcStrict = IEMExecDecodedClts(pVCpu, pVmxTransient->cbInstr);
10874 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10875 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
10876 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
10877 Log4(("CRX CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
10878 break;
10879 }
10880
10881 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
10882 {
10883 AssertRCReturn(rc, rc);
10884 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr,
10885 VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
10886 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT || rcStrict == VINF_PGM_CHANGE_MODE,
10887 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10888 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
10889 Log4(("CRX LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
10890 break;
10891 }
10892
10893 default:
10894 AssertMsgFailedReturn(("Invalid access-type in Mov CRx VM-exit qualification %#x\n", uAccessType),
10895 VERR_VMX_UNEXPECTED_EXCEPTION);
10896 }
10897
10898 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);
10899 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
10900 NOREF(pVM);
10901 return VBOXSTRICTRC_TODO(rcStrict);
10902}
10903
10904
10905/**
10906 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
10907 * VM-exit.
10908 */
10909HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10910{
10911 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10912 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
10913
10914 int rc2 = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10915 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
10916 rc2 |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
10917 rc2 |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
10918 rc2 |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
10919 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
10920 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
10921 AssertRCReturn(rc2, rc2);
10922
10923 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
10924 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
10925 uint8_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
10926 bool fIOWrite = ( VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
10927 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
10928 bool fIOString = VMX_EXIT_QUALIFICATION_IO_IS_STRING(pVmxTransient->uExitQualification);
10929 bool fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
10930 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction;
10931 AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_VMX_IPE_1);
10932
10933 /* I/O operation lookup arrays. */
10934 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */
10935 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
10936
10937 VBOXSTRICTRC rcStrict;
10938 uint32_t const cbValue = s_aIOSizes[uIOWidth];
10939 uint32_t const cbInstr = pVmxTransient->cbInstr;
10940 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
10941 PVM pVM = pVCpu->CTX_SUFF(pVM);
10942 if (fIOString)
10943 {
10944#ifdef VBOX_WITH_2ND_IEM_STEP /* This used to gurus with debian 32-bit guest without NP (on ATA reads).
10945 See @bugref{5752#c158}. Should work now. */
10946 /*
10947 * INS/OUTS - I/O String instruction.
10948 *
10949 * Use instruction-information if available, otherwise fall back on
10950 * interpreting the instruction.
10951 */
10952 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue,
10953 fIOWrite ? 'w' : 'r'));
10954 AssertReturn(pMixedCtx->dx == uIOPort, VERR_VMX_IPE_2);
10955 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo))
10956 {
10957 rc2 = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
10958 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
10959 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10960 AssertRCReturn(rc2, rc2);
10961 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
10962 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
10963 IEMMODE enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
10964 bool fRep = VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification);
10965 if (fIOWrite)
10966 {
10967 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
10968 pVmxTransient->ExitInstrInfo.StrIo.iSegReg);
10969 }
10970 else
10971 {
10972 /*
10973 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
10974 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
10975 * See Intel Instruction spec. for "INS".
10976 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
10977 */
10978 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr);
10979 }
10980 }
10981 else
10982 {
10983 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
10984 rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10985 AssertRCReturn(rc2, rc2);
10986 rcStrict = IEMExecOne(pVCpu);
10987 }
10988 /** @todo IEM needs to be setting these flags somehow. */
10989 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
10990 fUpdateRipAlready = true;
10991#else
10992 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
10993 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL /* pcbInstr */);
10994 if (RT_SUCCESS(rcStrict))
10995 {
10996 if (fIOWrite)
10997 {
10998 rcStrict = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
10999 (DISCPUMODE)pDis->uAddrMode, cbValue);
11000 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
11001 }
11002 else
11003 {
11004 rcStrict = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
11005 (DISCPUMODE)pDis->uAddrMode, cbValue);
11006 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
11007 }
11008 }
11009 else
11010 {
11011 AssertMsg(rcStrict == VERR_EM_INTERPRETER, ("rcStrict=%Rrc RIP=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict),
11012 pMixedCtx->rip));
11013 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
11014 }
11015#endif
11016 }
11017 else
11018 {
11019 /*
11020 * IN/OUT - I/O instruction.
11021 */
11022 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
11023 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
11024 Assert(!VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification));
11025 if (fIOWrite)
11026 {
11027 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue);
11028 if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
11029 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
11030 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
11031 }
11032 else
11033 {
11034 uint32_t u32Result = 0;
11035 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
11036 if (IOM_SUCCESS(rcStrict))
11037 {
11038 /* Save result of I/O IN instr. in AL/AX/EAX. */
11039 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
11040 }
11041 else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
11042 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
11043 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
11044 }
11045 }
11046
11047 if (IOM_SUCCESS(rcStrict))
11048 {
11049 if (!fUpdateRipAlready)
11050 {
11051 pMixedCtx->rip += cbInstr;
11052 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
11053 }
11054
11055 /*
11056 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru while booting Fedora 17 64-bit guest.
11057 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
11058 */
11059 if (fIOString)
11060 {
11061 /** @todo Single-step for INS/OUTS with REP prefix? */
11062 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
11063 }
11064 else if ( !fDbgStepping
11065 && fGstStepping)
11066 {
11067 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
11068 }
11069
11070 /*
11071 * If any I/O breakpoints are armed, we need to check if one triggered
11072 * and take appropriate action.
11073 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
11074 */
11075 rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11076 AssertRCReturn(rc2, rc2);
11077
11078 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
11079 * execution engines about whether hyper BPs and such are pending. */
11080 uint32_t const uDr7 = pMixedCtx->dr[7];
11081 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
11082 && X86_DR7_ANY_RW_IO(uDr7)
11083 && (pMixedCtx->cr4 & X86_CR4_DE))
11084 || DBGFBpIsHwIoArmed(pVM)))
11085 {
11086 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
11087
11088 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
11089 VMMRZCallRing3Disable(pVCpu);
11090 HM_DISABLE_PREEMPT();
11091
11092 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
11093
11094 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue);
11095 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
11096 {
11097 /* Raise #DB. */
11098 if (fIsGuestDbgActive)
11099 ASMSetDR6(pMixedCtx->dr[6]);
11100 if (pMixedCtx->dr[7] != uDr7)
11101 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
11102
11103 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
11104 }
11105 /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */
11106 else if ( rcStrict2 != VINF_SUCCESS
11107 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
11108 rcStrict = rcStrict2;
11109
11110 HM_RESTORE_PREEMPT();
11111 VMMRZCallRing3Enable(pVCpu);
11112 }
11113 }
11114
11115#ifdef VBOX_STRICT
11116 if (rcStrict == VINF_IOM_R3_IOPORT_READ)
11117 Assert(!fIOWrite);
11118 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
11119 Assert(fIOWrite);
11120 else
11121 {
11122#if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
11123 * statuses, that the VMM device and some others may return. See
11124 * IOM_SUCCESS() for guidance. */
11125 AssertMsg( RT_FAILURE(rcStrict)
11126 || rcStrict == VINF_SUCCESS
11127 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
11128 || rcStrict == VINF_EM_DBG_BREAKPOINT
11129 || rcStrict == VINF_EM_RAW_GUEST_TRAP
11130 || rcStrict == VINF_EM_RAW_TO_R3
11131 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11132#endif
11133 }
11134#endif
11135
11136 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
11137 return VBOXSTRICTRC_TODO(rcStrict);
11138}
11139
11140
11141/**
11142 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
11143 * VM-exit.
11144 */
11145HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11146{
11147 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11148
11149 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
11150 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11151 AssertRCReturn(rc, rc);
11152 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
11153 {
11154 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
11155 AssertRCReturn(rc, rc);
11156 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
11157 {
11158 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
11159
11160 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
11161 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
11162
11163 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */
11164 Assert(!pVCpu->hm.s.Event.fPending);
11165 pVCpu->hm.s.Event.fPending = true;
11166 pVCpu->hm.s.Event.u64IntInfo = pVmxTransient->uIdtVectoringInfo;
11167 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
11168 AssertRCReturn(rc, rc);
11169 if (fErrorCodeValid)
11170 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
11171 else
11172 pVCpu->hm.s.Event.u32ErrCode = 0;
11173 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
11174 && uVector == X86_XCPT_PF)
11175 {
11176 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2;
11177 }
11178
11179 Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
11180 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
11181 return VINF_EM_RAW_INJECT_TRPM_EVENT;
11182 }
11183 }
11184
11185 /** @todo Emulate task switch someday, currently just going back to ring-3 for
11186 * emulation. */
11187 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
11188 return VERR_EM_INTERPRETER;
11189}
11190
11191
11192/**
11193 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
11194 */
11195HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11196{
11197 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11198 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
11199 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
11200 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
11201 AssertRCReturn(rc, rc);
11202 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
11203 return VINF_EM_DBG_STEPPED;
11204}
11205
11206
11207/**
11208 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
11209 */
11210HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11211{
11212 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11213
11214 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11215 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11216 if (RT_UNLIKELY(rc != VINF_SUCCESS))
11217 {
11218 if (rc == VINF_HM_DOUBLE_FAULT)
11219 rc = VINF_SUCCESS;
11220 return rc;
11221 }
11222
11223#if 0
11224 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
11225 * just sync the whole thing. */
11226 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11227#else
11228 /* Aggressive state sync. for now. */
11229 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11230 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11231 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11232#endif
11233 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11234 AssertRCReturn(rc, rc);
11235
11236 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
11237 uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
11238 switch (uAccessType)
11239 {
11240 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
11241 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
11242 {
11243 AssertMsg( !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
11244 || VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) != 0x80,
11245 ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
11246
11247 RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
11248 GCPhys &= PAGE_BASE_GC_MASK;
11249 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
11250 PVM pVM = pVCpu->CTX_SUFF(pVM);
11251 Log4(("ApicAccess uAccessType=%#x GCPhys=%#RGv Off=%#x\n", uAccessType, GCPhys,
11252 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
11253
11254 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu,
11255 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW,
11256 CPUMCTX2CORE(pMixedCtx), GCPhys);
11257 rc = VBOXSTRICTRC_VAL(rc2);
11258 Log4(("ApicAccess rc=%d\n", rc));
11259 if ( rc == VINF_SUCCESS
11260 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11261 || rc == VERR_PAGE_NOT_PRESENT)
11262 {
11263 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11264 | HM_CHANGED_GUEST_RSP
11265 | HM_CHANGED_GUEST_RFLAGS
11266 | HM_CHANGED_VMX_GUEST_APIC_STATE);
11267 rc = VINF_SUCCESS;
11268 }
11269 break;
11270 }
11271
11272 default:
11273 Log4(("ApicAccess uAccessType=%#x\n", uAccessType));
11274 rc = VINF_EM_RAW_EMULATE_INSTR;
11275 break;
11276 }
11277
11278 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
11279 if (rc != VINF_SUCCESS)
11280 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchApicAccessToR3);
11281 return rc;
11282}
11283
11284
11285/**
11286 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
11287 * VM-exit.
11288 */
11289HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11290{
11291 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11292
11293 /* We should -not- get this VM-exit if the guest's debug registers were active. */
11294 if (pVmxTransient->fWasGuestDebugStateActive)
11295 {
11296 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11297 HMVMX_RETURN_UNEXPECTED_EXIT();
11298 }
11299
11300 int rc = VERR_INTERNAL_ERROR_5;
11301 if ( !pVCpu->hm.s.fSingleInstruction
11302 && !pVmxTransient->fWasHyperDebugStateActive)
11303 {
11304 Assert(!DBGFIsStepping(pVCpu));
11305 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB));
11306
11307 /* Don't intercept MOV DRx any more. */
11308 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
11309 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
11310 AssertRCReturn(rc, rc);
11311
11312 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
11313 VMMRZCallRing3Disable(pVCpu);
11314 HM_DISABLE_PREEMPT();
11315
11316 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
11317 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
11318 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
11319
11320 HM_RESTORE_PREEMPT();
11321 VMMRZCallRing3Enable(pVCpu);
11322
11323#ifdef VBOX_WITH_STATISTICS
11324 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11325 AssertRCReturn(rc, rc);
11326 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
11327 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
11328 else
11329 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
11330#endif
11331 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
11332 return VINF_SUCCESS;
11333 }
11334
11335 /*
11336 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date.
11337 * Update the segment registers and DR7 from the CPU.
11338 */
11339 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11340 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11341 rc |= hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11342 AssertRCReturn(rc, rc);
11343 Log4(("CS:RIP=%04x:%08RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
11344
11345 PVM pVM = pVCpu->CTX_SUFF(pVM);
11346 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
11347 {
11348 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
11349 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
11350 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
11351 if (RT_SUCCESS(rc))
11352 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
11353 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
11354 }
11355 else
11356 {
11357 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
11358 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
11359 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
11360 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
11361 }
11362
11363 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
11364 if (RT_SUCCESS(rc))
11365 {
11366 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11367 AssertRCReturn(rc2, rc2);
11368 }
11369 return rc;
11370}
11371
11372
11373/**
11374 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
11375 * Conditional VM-exit.
11376 */
11377HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11378{
11379 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11380 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
11381
11382 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11383 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11384 if (RT_UNLIKELY(rc != VINF_SUCCESS))
11385 {
11386 if (rc == VINF_HM_DOUBLE_FAULT)
11387 rc = VINF_SUCCESS;
11388 return rc;
11389 }
11390
11391 RTGCPHYS GCPhys = 0;
11392 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
11393
11394#if 0
11395 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
11396#else
11397 /* Aggressive state sync. for now. */
11398 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11399 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11400 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11401#endif
11402 AssertRCReturn(rc, rc);
11403
11404 /*
11405 * If we succeed, resume guest execution.
11406 * If we fail in interpreting the instruction because we couldn't get the guest physical address
11407 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
11408 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
11409 * weird case. See @bugref{6043}.
11410 */
11411 PVM pVM = pVCpu->CTX_SUFF(pVM);
11412 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
11413 rc = VBOXSTRICTRC_VAL(rc2);
11414 Log4(("EPT misconfig at %#RGv RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc));
11415 if ( rc == VINF_SUCCESS
11416 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11417 || rc == VERR_PAGE_NOT_PRESENT)
11418 {
11419 /* Successfully handled MMIO operation. */
11420 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11421 | HM_CHANGED_GUEST_RSP
11422 | HM_CHANGED_GUEST_RFLAGS
11423 | HM_CHANGED_VMX_GUEST_APIC_STATE);
11424 rc = VINF_SUCCESS;
11425 }
11426 return rc;
11427}
11428
11429
11430/**
11431 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
11432 * VM-exit.
11433 */
11434HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11435{
11436 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11437 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
11438
11439 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11440 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11441 if (RT_UNLIKELY(rc != VINF_SUCCESS))
11442 {
11443 if (rc == VINF_HM_DOUBLE_FAULT)
11444 rc = VINF_SUCCESS;
11445 return rc;
11446 }
11447
11448 RTGCPHYS GCPhys = 0;
11449 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
11450 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11451#if 0
11452 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
11453#else
11454 /* Aggressive state sync. for now. */
11455 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11456 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11457 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11458#endif
11459 AssertRCReturn(rc, rc);
11460
11461 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
11462 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
11463
11464 RTGCUINT uErrorCode = 0;
11465 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
11466 uErrorCode |= X86_TRAP_PF_ID;
11467 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
11468 uErrorCode |= X86_TRAP_PF_RW;
11469 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
11470 uErrorCode |= X86_TRAP_PF_P;
11471
11472 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
11473
11474 Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys,
11475 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
11476
11477 /* Handle the pagefault trap for the nested shadow table. */
11478 PVM pVM = pVCpu->CTX_SUFF(pVM);
11479 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
11480 TRPMResetTrap(pVCpu);
11481
11482 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
11483 if ( rc == VINF_SUCCESS
11484 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11485 || rc == VERR_PAGE_NOT_PRESENT)
11486 {
11487 /* Successfully synced our nested page tables. */
11488 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
11489 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11490 | HM_CHANGED_GUEST_RSP
11491 | HM_CHANGED_GUEST_RFLAGS);
11492 return VINF_SUCCESS;
11493 }
11494
11495 Log4(("EPT return to ring-3 rc=%Rrc\n", rc));
11496 return rc;
11497}
11498
11499/** @} */
11500
11501/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11502/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
11503/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11504
11505/** @name VM-exit exception handlers.
11506 * @{
11507 */
11508
11509/**
11510 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
11511 */
11512static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11513{
11514 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11515 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
11516
11517 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11518 AssertRCReturn(rc, rc);
11519
11520 if (!(pMixedCtx->cr0 & X86_CR0_NE))
11521 {
11522 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
11523 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
11524
11525 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
11526 * provides VM-exit instruction length. If this causes problem later,
11527 * disassemble the instruction like it's done on AMD-V. */
11528 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11529 AssertRCReturn(rc2, rc2);
11530 return rc;
11531 }
11532
11533 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11534 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11535 return rc;
11536}
11537
11538
11539/**
11540 * VM-exit exception handler for \#BP (Breakpoint exception).
11541 */
11542static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11543{
11544 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11545 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
11546
11547 /** @todo Try optimize this by not saving the entire guest state unless
11548 * really needed. */
11549 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11550 AssertRCReturn(rc, rc);
11551
11552 PVM pVM = pVCpu->CTX_SUFF(pVM);
11553 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11554 if (rc == VINF_EM_RAW_GUEST_TRAP)
11555 {
11556 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11557 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11558 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11559 AssertRCReturn(rc, rc);
11560
11561 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11562 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11563 }
11564
11565 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
11566 return rc;
11567}
11568
11569
11570/**
11571 * VM-exit exception handler for \#AC (alignment check exception).
11572 */
11573static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11574{
11575 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11576
11577 /*
11578 * Re-inject it. We'll detect any nesting before getting here.
11579 */
11580 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11581 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11582 AssertRCReturn(rc, rc);
11583 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
11584
11585 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11586 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11587 return VINF_SUCCESS;
11588}
11589
11590
11591/**
11592 * VM-exit exception handler for \#DB (Debug exception).
11593 */
11594static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11595{
11596 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11597 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
11598 Log6(("XcptDB\n"));
11599
11600 /*
11601 * Get the DR6-like values from the VM-exit qualification and pass it to DBGF
11602 * for processing.
11603 */
11604 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11605 AssertRCReturn(rc, rc);
11606
11607 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
11608 uint64_t uDR6 = X86_DR6_INIT_VAL;
11609 uDR6 |= ( pVmxTransient->uExitQualification
11610 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
11611
11612 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
11613 if (rc == VINF_EM_RAW_GUEST_TRAP)
11614 {
11615 /*
11616 * The exception was for the guest. Update DR6, DR7.GD and
11617 * IA32_DEBUGCTL.LBR before forwarding it.
11618 * (See Intel spec. 27.1 "Architectural State before a VM-Exit".)
11619 */
11620 VMMRZCallRing3Disable(pVCpu);
11621 HM_DISABLE_PREEMPT();
11622
11623 pMixedCtx->dr[6] &= ~X86_DR6_B_MASK;
11624 pMixedCtx->dr[6] |= uDR6;
11625 if (CPUMIsGuestDebugStateActive(pVCpu))
11626 ASMSetDR6(pMixedCtx->dr[6]);
11627
11628 HM_RESTORE_PREEMPT();
11629 VMMRZCallRing3Enable(pVCpu);
11630
11631 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11632 AssertRCReturn(rc, rc);
11633
11634 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
11635 pMixedCtx->dr[7] &= ~X86_DR7_GD;
11636
11637 /* Paranoia. */
11638 pMixedCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
11639 pMixedCtx->dr[7] |= X86_DR7_RA1_MASK;
11640
11641 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
11642 AssertRCReturn(rc, rc);
11643
11644 /*
11645 * Raise #DB in the guest.
11646 *
11647 * It is important to reflect what the VM-exit gave us (preserving the interruption-type) rather than use
11648 * hmR0VmxSetPendingXcptDB() as the #DB could've been raised while executing ICEBP and not the 'normal' #DB.
11649 * Thus it -may- trigger different handling in the CPU (like skipped DPL checks). See @bugref{6398}.
11650 *
11651 * Since ICEBP isn't documented on Intel, see AMD spec. 15.20 "Event Injection".
11652 */
11653 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11654 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11655 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11656 AssertRCReturn(rc, rc);
11657 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11658 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11659 return VINF_SUCCESS;
11660 }
11661
11662 /*
11663 * Not a guest trap, must be a hypervisor related debug event then.
11664 * Update DR6 in case someone is interested in it.
11665 */
11666 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
11667 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
11668 CPUMSetHyperDR6(pVCpu, uDR6);
11669
11670 return rc;
11671}
11672
11673
11674/**
11675 * VM-exit exception handler for \#NM (Device-not-available exception: floating
11676 * point exception).
11677 */
11678static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11679{
11680 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11681
11682 /* We require CR0 and EFER. EFER is always up-to-date. */
11683 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11684 AssertRCReturn(rc, rc);
11685
11686 /* We're playing with the host CPU state here, have to disable preemption or longjmp. */
11687 VMMRZCallRing3Disable(pVCpu);
11688 HM_DISABLE_PREEMPT();
11689
11690 /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */
11691 if (pVmxTransient->fWasGuestFPUStateActive)
11692 {
11693 rc = VINF_EM_RAW_GUEST_TRAP;
11694 Assert(CPUMIsGuestFPUStateActive(pVCpu) || HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));
11695 }
11696 else
11697 {
11698#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11699 Assert(!pVmxTransient->fWasGuestFPUStateActive);
11700#endif
11701 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
11702 Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu)));
11703 }
11704
11705 HM_RESTORE_PREEMPT();
11706 VMMRZCallRing3Enable(pVCpu);
11707
11708 if (rc == VINF_SUCCESS)
11709 {
11710 /* Guest FPU state was activated, we'll want to change CR0 FPU intercepts before the next VM-reentry. */
11711 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
11712 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
11713 pVCpu->hm.s.fPreloadGuestFpu = true;
11714 }
11715 else
11716 {
11717 /* Forward #NM to the guest. */
11718 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
11719 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11720 AssertRCReturn(rc, rc);
11721 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11722 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
11723 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
11724 }
11725
11726 return VINF_SUCCESS;
11727}
11728
11729
11730/**
11731 * VM-exit exception handler for \#GP (General-protection exception).
11732 *
11733 * @remarks Requires pVmxTransient->uExitIntInfo to be up-to-date.
11734 */
11735static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11736{
11737 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11738 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
11739
11740 int rc = VERR_INTERNAL_ERROR_5;
11741 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
11742 {
11743#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11744 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
11745 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11746 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11747 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11748 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11749 AssertRCReturn(rc, rc);
11750 Log4(("#GP Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip,
11751 pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));
11752 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11753 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11754 return rc;
11755#else
11756 /* We don't intercept #GP. */
11757 AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
11758 NOREF(pVmxTransient);
11759 return VERR_VMX_UNEXPECTED_EXCEPTION;
11760#endif
11761 }
11762
11763 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
11764 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
11765
11766 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
11767 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11768 AssertRCReturn(rc, rc);
11769
11770 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
11771 uint32_t cbOp = 0;
11772 PVM pVM = pVCpu->CTX_SUFF(pVM);
11773 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction;
11774 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
11775 if (RT_SUCCESS(rc))
11776 {
11777 rc = VINF_SUCCESS;
11778 Assert(cbOp == pDis->cbInstr);
11779 Log4(("#GP Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
11780 switch (pDis->pCurInstr->uOpcode)
11781 {
11782 case OP_CLI:
11783 {
11784 pMixedCtx->eflags.Bits.u1IF = 0;
11785 pMixedCtx->eflags.Bits.u1RF = 0;
11786 pMixedCtx->rip += pDis->cbInstr;
11787 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11788 if ( !fDbgStepping
11789 && pMixedCtx->eflags.Bits.u1TF)
11790 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
11791 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
11792 break;
11793 }
11794
11795 case OP_STI:
11796 {
11797 bool fOldIF = pMixedCtx->eflags.Bits.u1IF;
11798 pMixedCtx->eflags.Bits.u1IF = 1;
11799 pMixedCtx->eflags.Bits.u1RF = 0;
11800 pMixedCtx->rip += pDis->cbInstr;
11801 if (!fOldIF)
11802 {
11803 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
11804 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
11805 }
11806 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11807 if ( !fDbgStepping
11808 && pMixedCtx->eflags.Bits.u1TF)
11809 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
11810 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
11811 break;
11812 }
11813
11814 case OP_HLT:
11815 {
11816 rc = VINF_EM_HALT;
11817 pMixedCtx->rip += pDis->cbInstr;
11818 pMixedCtx->eflags.Bits.u1RF = 0;
11819 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11820 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
11821 break;
11822 }
11823
11824 case OP_POPF:
11825 {
11826 Log4(("POPF CS:EIP %04x:%04RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
11827 uint32_t cbParm;
11828 uint32_t uMask;
11829 bool fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
11830 if (pDis->fPrefix & DISPREFIX_OPSIZE)
11831 {
11832 cbParm = 4;
11833 uMask = 0xffffffff;
11834 }
11835 else
11836 {
11837 cbParm = 2;
11838 uMask = 0xffff;
11839 }
11840
11841 /* Get the stack pointer & pop the contents of the stack onto Eflags. */
11842 RTGCPTR GCPtrStack = 0;
11843 X86EFLAGS Eflags;
11844 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
11845 &GCPtrStack);
11846 if (RT_SUCCESS(rc))
11847 {
11848 Assert(sizeof(Eflags.u32) >= cbParm);
11849 Eflags.u32 = 0;
11850 rc = VBOXSTRICTRC_TODO(PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u32, cbParm, PGMACCESSORIGIN_HM));
11851 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc)); /** @todo allow strict return codes here */
11852 }
11853 if (RT_FAILURE(rc))
11854 {
11855 rc = VERR_EM_INTERPRETER;
11856 break;
11857 }
11858 Log4(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
11859 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF))
11860 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask);
11861 pMixedCtx->esp += cbParm;
11862 pMixedCtx->esp &= uMask;
11863 pMixedCtx->rip += pDis->cbInstr;
11864 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11865 | HM_CHANGED_GUEST_RSP
11866 | HM_CHANGED_GUEST_RFLAGS);
11867 /* Generate a pending-debug exception when the guest stepping over POPF regardless of how
11868 POPF restores EFLAGS.TF. */
11869 if ( !fDbgStepping
11870 && fGstStepping)
11871 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
11872 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
11873 break;
11874 }
11875
11876 case OP_PUSHF:
11877 {
11878 uint32_t cbParm;
11879 uint32_t uMask;
11880 if (pDis->fPrefix & DISPREFIX_OPSIZE)
11881 {
11882 cbParm = 4;
11883 uMask = 0xffffffff;
11884 }
11885 else
11886 {
11887 cbParm = 2;
11888 uMask = 0xffff;
11889 }
11890
11891 /* Get the stack pointer & push the contents of eflags onto the stack. */
11892 RTGCPTR GCPtrStack = 0;
11893 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
11894 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
11895 if (RT_FAILURE(rc))
11896 {
11897 rc = VERR_EM_INTERPRETER;
11898 break;
11899 }
11900 X86EFLAGS Eflags = pMixedCtx->eflags;
11901 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
11902 Eflags.Bits.u1RF = 0;
11903 Eflags.Bits.u1VM = 0;
11904
11905 rc = VBOXSTRICTRC_TODO(PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u, cbParm, PGMACCESSORIGIN_HM));
11906 if (RT_UNLIKELY(rc != VINF_SUCCESS))
11907 {
11908 AssertMsgFailed(("%Rrc\n", rc)); /** @todo allow strict return codes here */
11909 rc = VERR_EM_INTERPRETER;
11910 break;
11911 }
11912 Log4(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack));
11913 pMixedCtx->esp -= cbParm;
11914 pMixedCtx->esp &= uMask;
11915 pMixedCtx->rip += pDis->cbInstr;
11916 pMixedCtx->eflags.Bits.u1RF = 0;
11917 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11918 | HM_CHANGED_GUEST_RSP
11919 | HM_CHANGED_GUEST_RFLAGS);
11920 if ( !fDbgStepping
11921 && pMixedCtx->eflags.Bits.u1TF)
11922 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
11923 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
11924 break;
11925 }
11926
11927 case OP_IRET:
11928 {
11929 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
11930 * instruction reference. */
11931 RTGCPTR GCPtrStack = 0;
11932 uint32_t uMask = 0xffff;
11933 bool fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
11934 uint16_t aIretFrame[3];
11935 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
11936 {
11937 rc = VERR_EM_INTERPRETER;
11938 break;
11939 }
11940 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
11941 &GCPtrStack);
11942 if (RT_SUCCESS(rc))
11943 {
11944 rc = VBOXSTRICTRC_TODO(PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame),
11945 PGMACCESSORIGIN_HM));
11946 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc)); /** @todo allow strict return codes here */
11947 }
11948 if (RT_FAILURE(rc))
11949 {
11950 rc = VERR_EM_INTERPRETER;
11951 break;
11952 }
11953 pMixedCtx->eip = 0;
11954 pMixedCtx->ip = aIretFrame[0];
11955 pMixedCtx->cs.Sel = aIretFrame[1];
11956 pMixedCtx->cs.ValidSel = aIretFrame[1];
11957 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
11958 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF))
11959 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
11960 pMixedCtx->sp += sizeof(aIretFrame);
11961 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11962 | HM_CHANGED_GUEST_SEGMENT_REGS
11963 | HM_CHANGED_GUEST_RSP
11964 | HM_CHANGED_GUEST_RFLAGS);
11965 /* Generate a pending-debug exception when stepping over IRET regardless of how IRET modifies EFLAGS.TF. */
11966 if ( !fDbgStepping
11967 && fGstStepping)
11968 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
11969 Log4(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
11970 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
11971 break;
11972 }
11973
11974 case OP_INT:
11975 {
11976 uint16_t uVector = pDis->Param1.uValue & 0xff;
11977 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
11978 /* INT clears EFLAGS.TF, we mustn't set any pending debug exceptions here. */
11979 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
11980 break;
11981 }
11982
11983 case OP_INTO:
11984 {
11985 if (pMixedCtx->eflags.Bits.u1OF)
11986 {
11987 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
11988 /* INTO clears EFLAGS.TF, we mustn't set any pending debug exceptions here. */
11989 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
11990 }
11991 else
11992 {
11993 pMixedCtx->eflags.Bits.u1RF = 0;
11994 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
11995 }
11996 break;
11997 }
11998
11999 default:
12000 {
12001 pMixedCtx->eflags.Bits.u1RF = 0; /* This is correct most of the time... */
12002 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
12003 EMCODETYPE_SUPERVISOR);
12004 rc = VBOXSTRICTRC_VAL(rc2);
12005 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
12006 /** @todo We have to set pending-debug exceptions here when the guest is
12007 * single-stepping depending on the instruction that was interpreted. */
12008 Log4(("#GP rc=%Rrc\n", rc));
12009 break;
12010 }
12011 }
12012 }
12013 else
12014 rc = VERR_EM_INTERPRETER;
12015
12016 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
12017 ("#GP Unexpected rc=%Rrc\n", rc));
12018 return rc;
12019}
12020
12021
12022#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
12023/**
12024 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
12025 * the exception reported in the VMX transient structure back into the VM.
12026 *
12027 * @remarks Requires uExitIntInfo in the VMX transient structure to be
12028 * up-to-date.
12029 */
12030static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12031{
12032 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12033
12034 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
12035 hmR0VmxCheckExitDueToEventDelivery(). */
12036 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12037 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12038 AssertRCReturn(rc, rc);
12039 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
12040
12041#ifdef DEBUG_ramshankar
12042 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
12043 uint8_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
12044 Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip));
12045#endif
12046
12047 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12048 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
12049 return VINF_SUCCESS;
12050}
12051#endif
12052
12053
12054/**
12055 * VM-exit exception handler for \#PF (Page-fault exception).
12056 */
12057static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12058{
12059 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12060 PVM pVM = pVCpu->CTX_SUFF(pVM);
12061 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12062 rc |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
12063 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12064 AssertRCReturn(rc, rc);
12065
12066#if defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) || defined(HMVMX_ALWAYS_TRAP_PF)
12067 if (pVM->hm.s.fNestedPaging)
12068 {
12069 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
12070 if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF))
12071 {
12072 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
12073 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12074 0 /* cbInstr */, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQualification);
12075 }
12076 else
12077 {
12078 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
12079 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
12080 Log4(("Pending #DF due to vectoring #PF. NP\n"));
12081 }
12082 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
12083 return rc;
12084 }
12085#else
12086 Assert(!pVM->hm.s.fNestedPaging);
12087 NOREF(pVM);
12088#endif
12089
12090 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
12091 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
12092 if (pVmxTransient->fVectoringPF)
12093 {
12094 Assert(pVCpu->hm.s.Event.fPending);
12095 return VINF_EM_RAW_INJECT_TRPM_EVENT;
12096 }
12097
12098 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
12099 AssertRCReturn(rc, rc);
12100
12101 Log4(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
12102 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntErrorCode, pMixedCtx->cr3));
12103
12104 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
12105 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pMixedCtx),
12106 (RTGCPTR)pVmxTransient->uExitQualification);
12107
12108 Log4(("#PF: rc=%Rrc\n", rc));
12109 if (rc == VINF_SUCCESS)
12110 {
12111 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
12112 /** @todo this isn't quite right, what if guest does lgdt with some MMIO
12113 * memory? We don't update the whole state here... */
12114 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12115 | HM_CHANGED_GUEST_RSP
12116 | HM_CHANGED_GUEST_RFLAGS
12117 | HM_CHANGED_VMX_GUEST_APIC_STATE);
12118 TRPMResetTrap(pVCpu);
12119 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
12120 return rc;
12121 }
12122
12123 if (rc == VINF_EM_RAW_GUEST_TRAP)
12124 {
12125 if (!pVmxTransient->fVectoringDoublePF)
12126 {
12127 /* It's a guest page fault and needs to be reflected to the guest. */
12128 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
12129 TRPMResetTrap(pVCpu);
12130 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
12131 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
12132 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12133 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
12134 }
12135 else
12136 {
12137 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
12138 TRPMResetTrap(pVCpu);
12139 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
12140 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
12141 Log4(("#PF: Pending #DF due to vectoring #PF\n"));
12142 }
12143
12144 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
12145 return VINF_SUCCESS;
12146 }
12147
12148 TRPMResetTrap(pVCpu);
12149 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
12150 return rc;
12151}
12152
12153/** @} */
12154
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette