VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 55715

Last change on this file since 55715 was 55555, checked in by vboxsync, 10 years ago

VMX: Corrected IGS check.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 512.4 KB
Line 
1/* $Id: HMVMXR0.cpp 55555 2015-04-30 13:57:59Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <iprt/x86.h>
23#include <iprt/asm-amd64-x86.h>
24#include <iprt/thread.h>
25
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/dbgf.h>
28#include <VBox/vmm/iem.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/selm.h>
31#include <VBox/vmm/tm.h>
32#include <VBox/vmm/gim.h>
33#ifdef VBOX_WITH_REM
34# include <VBox/vmm/rem.h>
35#endif
36#include "HMInternal.h"
37#include <VBox/vmm/vm.h>
38#include "HMVMXR0.h"
39#include "dtrace/VBoxVMM.h"
40
41#ifdef DEBUG_ramshankar
42# define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS
43# define HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE
44# define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
45# define HMVMX_ALWAYS_CHECK_GUEST_STATE
46# define HMVMX_ALWAYS_TRAP_ALL_XCPTS
47# define HMVMX_ALWAYS_TRAP_PF
48# define HMVMX_ALWAYS_SWAP_FPU_STATE
49# define HMVMX_ALWAYS_FLUSH_TLB
50# define HMVMX_ALWAYS_SWAP_EFER
51#endif
52
53
54/*******************************************************************************
55* Defined Constants And Macros *
56*******************************************************************************/
57#if defined(RT_ARCH_AMD64)
58# define HMVMX_IS_64BIT_HOST_MODE() (true)
59typedef RTHCUINTREG HMVMXHCUINTREG;
60#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
61extern "C" uint32_t g_fVMXIs64bitHost;
62# define HMVMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)
63typedef uint64_t HMVMXHCUINTREG;
64#else
65# define HMVMX_IS_64BIT_HOST_MODE() (false)
66typedef RTHCUINTREG HMVMXHCUINTREG;
67#endif
68
69/** Use the function table. */
70#define HMVMX_USE_FUNCTION_TABLE
71
72/** Determine which tagged-TLB flush handler to use. */
73#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
74#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
75#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
76#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
77
78/** @name Updated-guest-state flags.
79 * @{ */
80#define HMVMX_UPDATED_GUEST_RIP RT_BIT(0)
81#define HMVMX_UPDATED_GUEST_RSP RT_BIT(1)
82#define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2)
83#define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3)
84#define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4)
85#define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5)
86#define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6)
87#define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7)
88#define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8)
89#define HMVMX_UPDATED_GUEST_TR RT_BIT(9)
90#define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10)
91#define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(11)
92#define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(12)
93#define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(13)
94#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(14)
95#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(15)
96#define HMVMX_UPDATED_GUEST_LAZY_MSRS RT_BIT(16)
97#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(17)
98#define HMVMX_UPDATED_GUEST_INTR_STATE RT_BIT(18)
99#define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19)
100#define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \
101 | HMVMX_UPDATED_GUEST_RSP \
102 | HMVMX_UPDATED_GUEST_RFLAGS \
103 | HMVMX_UPDATED_GUEST_CR0 \
104 | HMVMX_UPDATED_GUEST_CR3 \
105 | HMVMX_UPDATED_GUEST_CR4 \
106 | HMVMX_UPDATED_GUEST_GDTR \
107 | HMVMX_UPDATED_GUEST_IDTR \
108 | HMVMX_UPDATED_GUEST_LDTR \
109 | HMVMX_UPDATED_GUEST_TR \
110 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \
111 | HMVMX_UPDATED_GUEST_DEBUG \
112 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \
113 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \
114 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \
115 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \
116 | HMVMX_UPDATED_GUEST_LAZY_MSRS \
117 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \
118 | HMVMX_UPDATED_GUEST_INTR_STATE \
119 | HMVMX_UPDATED_GUEST_APIC_STATE)
120/** @} */
121
122/** @name
123 * Flags to skip redundant reads of some common VMCS fields that are not part of
124 * the guest-CPU state but are in the transient structure.
125 */
126#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
127#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
128#define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
129#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3)
130#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
131#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
132#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO RT_BIT(6)
133/** @} */
134
135/** @name
136 * States of the VMCS.
137 *
138 * This does not reflect all possible VMCS states but currently only those
139 * needed for maintaining the VMCS consistently even when thread-context hooks
140 * are used. Maybe later this can be extended (i.e. Nested Virtualization).
141 */
142#define HMVMX_VMCS_STATE_CLEAR RT_BIT(0)
143#define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1)
144#define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2)
145/** @} */
146
147/**
148 * Exception bitmap mask for real-mode guests (real-on-v86).
149 *
150 * We need to intercept all exceptions manually except:
151 * - #NM, #MF handled in hmR0VmxLoadSharedCR0().
152 * - #DB handled in hmR0VmxLoadSharedDebugState().
153 * - #PF need not be intercepted even in real-mode if we have Nested Paging
154 * support.
155 */
156#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
157 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
158 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
159 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
160 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
161 /* RT_BIT(X86_XCPT_MF) */ | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \
162 | RT_BIT(X86_XCPT_XF))
163
164/**
165 * Exception bitmap mask for all contributory exceptions.
166 *
167 * Page fault is deliberately excluded here as it's conditional as to whether
168 * it's contributory or benign. Page faults are handled separately.
169 */
170#define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
171 | RT_BIT(X86_XCPT_DE))
172
173/** Maximum VM-instruction error number. */
174#define HMVMX_INSTR_ERROR_MAX 28
175
176/** Profiling macro. */
177#ifdef HM_PROFILE_EXIT_DISPATCH
178# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
179# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
180#else
181# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
182# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
183#endif
184
185/** Assert that preemption is disabled or covered by thread-context hooks. */
186#define HMVMX_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
187 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
188
189/** Assert that we haven't migrated CPUs when thread-context hooks are not
190 * used. */
191#define HMVMX_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
192 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
193 ("Illegal migration! Entered on CPU %u Current %u\n", \
194 pVCpu->hm.s.idEnteredCpu, RTMpCpuId())); \
195
196/** Helper macro for VM-exit handlers called unexpectedly. */
197#define HMVMX_RETURN_UNEXPECTED_EXIT() \
198 do { \
199 pVCpu->hm.s.u32HMError = pVmxTransient->uExitReason; \
200 return VERR_VMX_UNEXPECTED_EXIT; \
201 } while (0)
202
203
204/*******************************************************************************
205* Structures and Typedefs *
206*******************************************************************************/
207/**
208 * VMX transient state.
209 *
210 * A state structure for holding miscellaneous information across
211 * VMX non-root operation and restored after the transition.
212 */
213typedef struct VMXTRANSIENT
214{
215 /** The host's rflags/eflags. */
216 RTCCUINTREG uEflags;
217#if HC_ARCH_BITS == 32
218 uint32_t u32Alignment0;
219#endif
220 /** The guest's TPR value used for TPR shadowing. */
221 uint8_t u8GuestTpr;
222 /** Alignment. */
223 uint8_t abAlignment0[7];
224
225 /** The basic VM-exit reason. */
226 uint16_t uExitReason;
227 /** Alignment. */
228 uint16_t u16Alignment0;
229 /** The VM-exit interruption error code. */
230 uint32_t uExitIntErrorCode;
231 /** The VM-exit exit code qualification. */
232 uint64_t uExitQualification;
233
234 /** The VM-exit interruption-information field. */
235 uint32_t uExitIntInfo;
236 /** The VM-exit instruction-length field. */
237 uint32_t cbInstr;
238 /** The VM-exit instruction-information field. */
239 union
240 {
241 /** Plain unsigned int representation. */
242 uint32_t u;
243 /** INS and OUTS information. */
244 struct
245 {
246 uint32_t u6Reserved0 : 7;
247 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
248 uint32_t u3AddrSize : 3;
249 uint32_t u5Reserved1 : 5;
250 /** The segment register (X86_SREG_XXX). */
251 uint32_t iSegReg : 3;
252 uint32_t uReserved2 : 14;
253 } StrIo;
254 } ExitInstrInfo;
255 /** Whether the VM-entry failed or not. */
256 bool fVMEntryFailed;
257 /** Alignment. */
258 uint8_t abAlignment1[3];
259
260 /** The VM-entry interruption-information field. */
261 uint32_t uEntryIntInfo;
262 /** The VM-entry exception error code field. */
263 uint32_t uEntryXcptErrorCode;
264 /** The VM-entry instruction length field. */
265 uint32_t cbEntryInstr;
266
267 /** IDT-vectoring information field. */
268 uint32_t uIdtVectoringInfo;
269 /** IDT-vectoring error code. */
270 uint32_t uIdtVectoringErrorCode;
271
272 /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
273 uint32_t fVmcsFieldsRead;
274
275 /** Whether the guest FPU was active at the time of VM-exit. */
276 bool fWasGuestFPUStateActive;
277 /** Whether the guest debug state was active at the time of VM-exit. */
278 bool fWasGuestDebugStateActive;
279 /** Whether the hyper debug state was active at the time of VM-exit. */
280 bool fWasHyperDebugStateActive;
281 /** Whether TSC-offsetting should be setup before VM-entry. */
282 bool fUpdateTscOffsettingAndPreemptTimer;
283 /** Whether the VM-exit was caused by a page-fault during delivery of a
284 * contributory exception or a page-fault. */
285 bool fVectoringDoublePF;
286 /** Whether the VM-exit was caused by a page-fault during delivery of an
287 * external interrupt or NMI. */
288 bool fVectoringPF;
289} VMXTRANSIENT;
290AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
291AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntInfo, sizeof(uint64_t));
292AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo, sizeof(uint64_t));
293AssertCompileMemberAlignment(VMXTRANSIENT, fWasGuestFPUStateActive, sizeof(uint64_t));
294AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
295/** Pointer to VMX transient state. */
296typedef VMXTRANSIENT *PVMXTRANSIENT;
297
298
299/**
300 * MSR-bitmap read permissions.
301 */
302typedef enum VMXMSREXITREAD
303{
304 /** Reading this MSR causes a VM-exit. */
305 VMXMSREXIT_INTERCEPT_READ = 0xb,
306 /** Reading this MSR does not cause a VM-exit. */
307 VMXMSREXIT_PASSTHRU_READ
308} VMXMSREXITREAD;
309/** Pointer to MSR-bitmap read permissions. */
310typedef VMXMSREXITREAD* PVMXMSREXITREAD;
311
312/**
313 * MSR-bitmap write permissions.
314 */
315typedef enum VMXMSREXITWRITE
316{
317 /** Writing to this MSR causes a VM-exit. */
318 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
319 /** Writing to this MSR does not cause a VM-exit. */
320 VMXMSREXIT_PASSTHRU_WRITE
321} VMXMSREXITWRITE;
322/** Pointer to MSR-bitmap write permissions. */
323typedef VMXMSREXITWRITE* PVMXMSREXITWRITE;
324
325
326/**
327 * VMX VM-exit handler.
328 *
329 * @returns VBox status code.
330 * @param pVCpu Pointer to the VMCPU.
331 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
332 * out-of-sync. Make sure to update the required
333 * fields before using them.
334 * @param pVmxTransient Pointer to the VMX-transient structure.
335 */
336#ifndef HMVMX_USE_FUNCTION_TABLE
337typedef int FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
338#else
339typedef DECLCALLBACK(int) FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
340/** Pointer to VM-exit handler. */
341typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
342#endif
343
344
345/*******************************************************************************
346* Internal Functions *
347*******************************************************************************/
348static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush);
349static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr);
350static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
351 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress,
352 bool fStepping, uint32_t *puIntState);
353#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
354static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
355#endif
356#ifndef HMVMX_USE_FUNCTION_TABLE
357DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
358# define HMVMX_EXIT_DECL static int
359#else
360# define HMVMX_EXIT_DECL static DECLCALLBACK(int)
361#endif
362DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitStep(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
363 uint32_t uExitReason, uint16_t uCsStart, uint64_t uRipStart);
364
365/** @name VM-exit handlers.
366 * @{
367 */
368static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmi;
369static FNVMXEXITHANDLER hmR0VmxExitExtInt;
370static FNVMXEXITHANDLER hmR0VmxExitTripleFault;
371static FNVMXEXITHANDLER hmR0VmxExitInitSignal;
372static FNVMXEXITHANDLER hmR0VmxExitSipi;
373static FNVMXEXITHANDLER hmR0VmxExitIoSmi;
374static FNVMXEXITHANDLER hmR0VmxExitSmi;
375static FNVMXEXITHANDLER hmR0VmxExitIntWindow;
376static FNVMXEXITHANDLER hmR0VmxExitNmiWindow;
377static FNVMXEXITHANDLER hmR0VmxExitTaskSwitch;
378static FNVMXEXITHANDLER hmR0VmxExitCpuid;
379static FNVMXEXITHANDLER hmR0VmxExitGetsec;
380static FNVMXEXITHANDLER hmR0VmxExitHlt;
381static FNVMXEXITHANDLER hmR0VmxExitInvd;
382static FNVMXEXITHANDLER hmR0VmxExitInvlpg;
383static FNVMXEXITHANDLER hmR0VmxExitRdpmc;
384static FNVMXEXITHANDLER hmR0VmxExitVmcall;
385static FNVMXEXITHANDLER hmR0VmxExitRdtsc;
386static FNVMXEXITHANDLER hmR0VmxExitRsm;
387static FNVMXEXITHANDLER hmR0VmxExitSetPendingXcptUD;
388static FNVMXEXITHANDLER hmR0VmxExitMovCRx;
389static FNVMXEXITHANDLER hmR0VmxExitMovDRx;
390static FNVMXEXITHANDLER hmR0VmxExitIoInstr;
391static FNVMXEXITHANDLER hmR0VmxExitRdmsr;
392static FNVMXEXITHANDLER hmR0VmxExitWrmsr;
393static FNVMXEXITHANDLER hmR0VmxExitErrInvalidGuestState;
394static FNVMXEXITHANDLER hmR0VmxExitErrMsrLoad;
395static FNVMXEXITHANDLER hmR0VmxExitErrUndefined;
396static FNVMXEXITHANDLER hmR0VmxExitMwait;
397static FNVMXEXITHANDLER hmR0VmxExitMtf;
398static FNVMXEXITHANDLER hmR0VmxExitMonitor;
399static FNVMXEXITHANDLER hmR0VmxExitPause;
400static FNVMXEXITHANDLER hmR0VmxExitErrMachineCheck;
401static FNVMXEXITHANDLER hmR0VmxExitTprBelowThreshold;
402static FNVMXEXITHANDLER hmR0VmxExitApicAccess;
403static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
404static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
405static FNVMXEXITHANDLER hmR0VmxExitEptViolation;
406static FNVMXEXITHANDLER hmR0VmxExitEptMisconfig;
407static FNVMXEXITHANDLER hmR0VmxExitRdtscp;
408static FNVMXEXITHANDLER hmR0VmxExitPreemptTimer;
409static FNVMXEXITHANDLER hmR0VmxExitWbinvd;
410static FNVMXEXITHANDLER hmR0VmxExitXsetbv;
411static FNVMXEXITHANDLER hmR0VmxExitRdrand;
412static FNVMXEXITHANDLER hmR0VmxExitInvpcid;
413/** @} */
414
415static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
416static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
417static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
418static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
419static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
420static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
421#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
422static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
423#endif
424static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
425
426/*******************************************************************************
427* Global Variables *
428*******************************************************************************/
429#ifdef HMVMX_USE_FUNCTION_TABLE
430
431/**
432 * VMX_EXIT dispatch table.
433 */
434static const PFNVMXEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
435{
436 /* 00 VMX_EXIT_XCPT_OR_NMI */ hmR0VmxExitXcptOrNmi,
437 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
438 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
439 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
440 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
441 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
442 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
443 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
444 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
445 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
446 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
447 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
448 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
449 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
450 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
451 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
452 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
453 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
454 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitVmcall,
455 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
456 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
457 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
458 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
459 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
460 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
461 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
462 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
463 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
464 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
465 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
466 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
467 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
468 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
469 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
470 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
471 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
472 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
473 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
474 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
475 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
476 /* 40 UNDEFINED */ hmR0VmxExitPause,
477 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
478 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
479 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
480 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
481 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
482 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
483 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
484 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
485 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
486 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
487 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
488 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
489 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
490 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
491 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
492 /* 56 UNDEFINED */ hmR0VmxExitErrUndefined,
493 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
494 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
495 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD,
496 /* 60 VMX_EXIT_RESERVED_60 */ hmR0VmxExitErrUndefined,
497 /* 61 VMX_EXIT_RDSEED */ hmR0VmxExitErrUndefined, /* only spurious exits, so undefined */
498 /* 62 VMX_EXIT_RESERVED_62 */ hmR0VmxExitErrUndefined,
499 /* 63 VMX_EXIT_XSAVES */ hmR0VmxExitSetPendingXcptUD,
500 /* 64 VMX_EXIT_XRSTORS */ hmR0VmxExitSetPendingXcptUD,
501};
502#endif /* HMVMX_USE_FUNCTION_TABLE */
503
504#ifdef VBOX_STRICT
505static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
506{
507 /* 0 */ "(Not Used)",
508 /* 1 */ "VMCALL executed in VMX root operation.",
509 /* 2 */ "VMCLEAR with invalid physical address.",
510 /* 3 */ "VMCLEAR with VMXON pointer.",
511 /* 4 */ "VMLAUNCH with non-clear VMCS.",
512 /* 5 */ "VMRESUME with non-launched VMCS.",
513 /* 6 */ "VMRESUME after VMXOFF",
514 /* 7 */ "VM-entry with invalid control fields.",
515 /* 8 */ "VM-entry with invalid host state fields.",
516 /* 9 */ "VMPTRLD with invalid physical address.",
517 /* 10 */ "VMPTRLD with VMXON pointer.",
518 /* 11 */ "VMPTRLD with incorrect revision identifier.",
519 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
520 /* 13 */ "VMWRITE to read-only VMCS component.",
521 /* 14 */ "(Not Used)",
522 /* 15 */ "VMXON executed in VMX root operation.",
523 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
524 /* 17 */ "VM-entry with non-launched executing VMCS.",
525 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
526 /* 19 */ "VMCALL with non-clear VMCS.",
527 /* 20 */ "VMCALL with invalid VM-exit control fields.",
528 /* 21 */ "(Not Used)",
529 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
530 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
531 /* 24 */ "VMCALL with invalid SMM-monitor features.",
532 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
533 /* 26 */ "VM-entry with events blocked by MOV SS.",
534 /* 27 */ "(Not Used)",
535 /* 28 */ "Invalid operand to INVEPT/INVVPID."
536};
537#endif /* VBOX_STRICT */
538
539
540
541/**
542 * Updates the VM's last error record. If there was a VMX instruction error,
543 * reads the error data from the VMCS and updates VCPU's last error record as
544 * well.
545 *
546 * @param pVM Pointer to the VM.
547 * @param pVCpu Pointer to the VMCPU (can be NULL if @a rc is not
548 * VERR_VMX_UNABLE_TO_START_VM or
549 * VERR_VMX_INVALID_VMCS_FIELD).
550 * @param rc The error code.
551 */
552static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
553{
554 AssertPtr(pVM);
555 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
556 || rc == VERR_VMX_UNABLE_TO_START_VM)
557 {
558 AssertPtrReturnVoid(pVCpu);
559 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
560 }
561 pVM->hm.s.lLastError = rc;
562}
563
564
565/**
566 * Reads the VM-entry interruption-information field from the VMCS into the VMX
567 * transient structure.
568 *
569 * @returns VBox status code.
570 * @param pVmxTransient Pointer to the VMX transient structure.
571 *
572 * @remarks No-long-jump zone!!!
573 */
574DECLINLINE(int) hmR0VmxReadEntryIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
575{
576 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
577 AssertRCReturn(rc, rc);
578 return VINF_SUCCESS;
579}
580
581
582/**
583 * Reads the VM-entry exception error code field from the VMCS into
584 * the VMX transient structure.
585 *
586 * @returns VBox status code.
587 * @param pVmxTransient Pointer to the VMX transient structure.
588 *
589 * @remarks No-long-jump zone!!!
590 */
591DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
592{
593 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
594 AssertRCReturn(rc, rc);
595 return VINF_SUCCESS;
596}
597
598
599/**
600 * Reads the VM-entry exception error code field from the VMCS into
601 * the VMX transient structure.
602 *
603 * @returns VBox status code.
604 * @param pVmxTransient Pointer to the VMX transient structure.
605 *
606 * @remarks No-long-jump zone!!!
607 */
608DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
609{
610 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
611 AssertRCReturn(rc, rc);
612 return VINF_SUCCESS;
613}
614
615
616/**
617 * Reads the VM-exit interruption-information field from the VMCS into the VMX
618 * transient structure.
619 *
620 * @returns VBox status code.
621 * @param pVmxTransient Pointer to the VMX transient structure.
622 */
623DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
624{
625 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
626 {
627 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
628 AssertRCReturn(rc, rc);
629 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
630 }
631 return VINF_SUCCESS;
632}
633
634
635/**
636 * Reads the VM-exit interruption error code from the VMCS into the VMX
637 * transient structure.
638 *
639 * @returns VBox status code.
640 * @param pVmxTransient Pointer to the VMX transient structure.
641 */
642DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
643{
644 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
645 {
646 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
647 AssertRCReturn(rc, rc);
648 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
649 }
650 return VINF_SUCCESS;
651}
652
653
654/**
655 * Reads the VM-exit instruction length field from the VMCS into the VMX
656 * transient structure.
657 *
658 * @returns VBox status code.
659 * @param pVCpu Pointer to the VMCPU.
660 * @param pVmxTransient Pointer to the VMX transient structure.
661 */
662DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
663{
664 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
665 {
666 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
667 AssertRCReturn(rc, rc);
668 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
669 }
670 return VINF_SUCCESS;
671}
672
673
674/**
675 * Reads the VM-exit instruction-information field from the VMCS into
676 * the VMX transient structure.
677 *
678 * @returns VBox status code.
679 * @param pVmxTransient Pointer to the VMX transient structure.
680 */
681DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient)
682{
683 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO))
684 {
685 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
686 AssertRCReturn(rc, rc);
687 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO;
688 }
689 return VINF_SUCCESS;
690}
691
692
693/**
694 * Reads the exit code qualification from the VMCS into the VMX transient
695 * structure.
696 *
697 * @returns VBox status code.
698 * @param pVCpu Pointer to the VMCPU (required for the VMCS cache
699 * case).
700 * @param pVmxTransient Pointer to the VMX transient structure.
701 */
702DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
703{
704 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
705 {
706 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification); NOREF(pVCpu);
707 AssertRCReturn(rc, rc);
708 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
709 }
710 return VINF_SUCCESS;
711}
712
713
714/**
715 * Reads the IDT-vectoring information field from the VMCS into the VMX
716 * transient structure.
717 *
718 * @returns VBox status code.
719 * @param pVmxTransient Pointer to the VMX transient structure.
720 *
721 * @remarks No-long-jump zone!!!
722 */
723DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
724{
725 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
726 {
727 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
728 AssertRCReturn(rc, rc);
729 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
730 }
731 return VINF_SUCCESS;
732}
733
734
735/**
736 * Reads the IDT-vectoring error code from the VMCS into the VMX
737 * transient structure.
738 *
739 * @returns VBox status code.
740 * @param pVmxTransient Pointer to the VMX transient structure.
741 */
742DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
743{
744 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
745 {
746 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
747 AssertRCReturn(rc, rc);
748 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
749 }
750 return VINF_SUCCESS;
751}
752
753
754/**
755 * Enters VMX root mode operation on the current CPU.
756 *
757 * @returns VBox status code.
758 * @param pVM Pointer to the VM (optional, can be NULL, after
759 * a resume).
760 * @param HCPhysCpuPage Physical address of the VMXON region.
761 * @param pvCpuPage Pointer to the VMXON region.
762 */
763static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
764{
765 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
766 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
767 Assert(pvCpuPage);
768 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
769
770 if (pVM)
771 {
772 /* Write the VMCS revision dword to the VMXON region. */
773 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
774 }
775
776 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */
777 RTCCUINTREG uEflags = ASMIntDisableFlags();
778
779 /* Enable the VMX bit in CR4 if necessary. */
780 RTCCUINTREG uOldCr4 = SUPR0ChangeCR4(X86_CR4_VMXE, ~0);
781
782 /* Enter VMX root mode. */
783 int rc = VMXEnable(HCPhysCpuPage);
784 if ( RT_FAILURE(rc)
785 && !(uOldCr4 & X86_CR4_VMXE))
786 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
787
788 /* Restore interrupts. */
789 ASMSetFlags(uEflags);
790 return rc;
791}
792
793
794/**
795 * Exits VMX root mode operation on the current CPU.
796 *
797 * @returns VBox status code.
798 */
799static int hmR0VmxLeaveRootMode(void)
800{
801 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
802
803 /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */
804 RTCCUINTREG uEflags = ASMIntDisableFlags();
805
806 /* If we're for some reason not in VMX root mode, then don't leave it. */
807 RTCCUINTREG uHostCR4 = ASMGetCR4();
808
809 int rc;
810 if (uHostCR4 & X86_CR4_VMXE)
811 {
812 /* Exit VMX root mode and clear the VMX bit in CR4. */
813 VMXDisable();
814 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
815 rc = VINF_SUCCESS;
816 }
817 else
818 rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE;
819
820 /* Restore interrupts. */
821 ASMSetFlags(uEflags);
822 return rc;
823}
824
825
826/**
827 * Allocates and maps one physically contiguous page. The allocated page is
828 * zero'd out. (Used by various VT-x structures).
829 *
830 * @returns IPRT status code.
831 * @param pMemObj Pointer to the ring-0 memory object.
832 * @param ppVirt Where to store the virtual address of the
833 * allocation.
834 * @param pPhys Where to store the physical address of the
835 * allocation.
836 */
837DECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
838{
839 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
840 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
841 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
842
843 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
844 if (RT_FAILURE(rc))
845 return rc;
846 *ppVirt = RTR0MemObjAddress(*pMemObj);
847 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
848 ASMMemZero32(*ppVirt, PAGE_SIZE);
849 return VINF_SUCCESS;
850}
851
852
853/**
854 * Frees and unmaps an allocated physical page.
855 *
856 * @param pMemObj Pointer to the ring-0 memory object.
857 * @param ppVirt Where to re-initialize the virtual address of
858 * allocation as 0.
859 * @param pHCPhys Where to re-initialize the physical address of the
860 * allocation as 0.
861 */
862DECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
863{
864 AssertPtr(pMemObj);
865 AssertPtr(ppVirt);
866 AssertPtr(pHCPhys);
867 if (*pMemObj != NIL_RTR0MEMOBJ)
868 {
869 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
870 AssertRC(rc);
871 *pMemObj = NIL_RTR0MEMOBJ;
872 *ppVirt = 0;
873 *pHCPhys = 0;
874 }
875}
876
877
878/**
879 * Worker function to free VT-x related structures.
880 *
881 * @returns IPRT status code.
882 * @param pVM Pointer to the VM.
883 */
884static void hmR0VmxStructsFree(PVM pVM)
885{
886 for (VMCPUID i = 0; i < pVM->cCpus; i++)
887 {
888 PVMCPU pVCpu = &pVM->aCpus[i];
889 AssertPtr(pVCpu);
890
891 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
892 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
893
894 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
895 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
896
897 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
898 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
899 }
900
901 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
902#ifdef VBOX_WITH_CRASHDUMP_MAGIC
903 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
904#endif
905}
906
907
908/**
909 * Worker function to allocate VT-x related VM structures.
910 *
911 * @returns IPRT status code.
912 * @param pVM Pointer to the VM.
913 */
914static int hmR0VmxStructsAlloc(PVM pVM)
915{
916 /*
917 * Initialize members up-front so we can cleanup properly on allocation failure.
918 */
919#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
920 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
921 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
922 pVM->hm.s.vmx.HCPhys##a_Name = 0;
923
924#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
925 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
926 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
927 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
928
929#ifdef VBOX_WITH_CRASHDUMP_MAGIC
930 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
931#endif
932 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
933
934 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
935 for (VMCPUID i = 0; i < pVM->cCpus; i++)
936 {
937 PVMCPU pVCpu = &pVM->aCpus[i];
938 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
939 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
940 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
941 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
942 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
943 }
944#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
945#undef VMXLOCAL_INIT_VM_MEMOBJ
946
947 /* The VMCS size cannot be more than 4096 bytes. See Intel spec. Appendix A.1 "Basic VMX Information". */
948 AssertReturnStmt(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.Msrs.u64BasicInfo) <= PAGE_SIZE,
949 (&pVM->aCpus[0])->hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE,
950 VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO);
951
952 /*
953 * Allocate all the VT-x structures.
954 */
955 int rc = VINF_SUCCESS;
956#ifdef VBOX_WITH_CRASHDUMP_MAGIC
957 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
958 if (RT_FAILURE(rc))
959 goto cleanup;
960 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
961 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
962#endif
963
964 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
965 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
966 {
967 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
968 &pVM->hm.s.vmx.HCPhysApicAccess);
969 if (RT_FAILURE(rc))
970 goto cleanup;
971 }
972
973 /*
974 * Initialize per-VCPU VT-x structures.
975 */
976 for (VMCPUID i = 0; i < pVM->cCpus; i++)
977 {
978 PVMCPU pVCpu = &pVM->aCpus[i];
979 AssertPtr(pVCpu);
980
981 /* Allocate the VM control structure (VMCS). */
982 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
983 if (RT_FAILURE(rc))
984 goto cleanup;
985
986 /* Allocate the Virtual-APIC page for transparent TPR accesses. */
987 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
988 {
989 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
990 &pVCpu->hm.s.vmx.HCPhysVirtApic);
991 if (RT_FAILURE(rc))
992 goto cleanup;
993 }
994
995 /*
996 * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for
997 * transparent accesses of specific MSRs.
998 *
999 * If the condition for enabling MSR bitmaps changes here, don't forget to
1000 * update HMIsMsrBitmapsAvailable().
1001 */
1002 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1003 {
1004 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
1005 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1006 if (RT_FAILURE(rc))
1007 goto cleanup;
1008 ASMMemFill32(pVCpu->hm.s.vmx.pvMsrBitmap, PAGE_SIZE, UINT32_C(0xffffffff));
1009 }
1010
1011 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
1012 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
1013 if (RT_FAILURE(rc))
1014 goto cleanup;
1015
1016 /* Allocate the VM-exit MSR-load page for the host MSRs. */
1017 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
1018 if (RT_FAILURE(rc))
1019 goto cleanup;
1020 }
1021
1022 return VINF_SUCCESS;
1023
1024cleanup:
1025 hmR0VmxStructsFree(pVM);
1026 return rc;
1027}
1028
1029
1030/**
1031 * Does global VT-x initialization (called during module initialization).
1032 *
1033 * @returns VBox status code.
1034 */
1035VMMR0DECL(int) VMXR0GlobalInit(void)
1036{
1037#ifdef HMVMX_USE_FUNCTION_TABLE
1038 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
1039# ifdef VBOX_STRICT
1040 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
1041 Assert(g_apfnVMExitHandlers[i]);
1042# endif
1043#endif
1044 return VINF_SUCCESS;
1045}
1046
1047
1048/**
1049 * Does global VT-x termination (called during module termination).
1050 */
1051VMMR0DECL(void) VMXR0GlobalTerm()
1052{
1053 /* Nothing to do currently. */
1054}
1055
1056
1057/**
1058 * Sets up and activates VT-x on the current CPU.
1059 *
1060 * @returns VBox status code.
1061 * @param pCpu Pointer to the global CPU info struct.
1062 * @param pVM Pointer to the VM (can be NULL after a host resume
1063 * operation).
1064 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
1065 * fEnabledByHost is true).
1066 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
1067 * @a fEnabledByHost is true).
1068 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
1069 * enable VT-x on the host.
1070 * @param pvMsrs Opaque pointer to VMXMSRS struct.
1071 */
1072VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
1073 void *pvMsrs)
1074{
1075 Assert(pCpu);
1076 Assert(pvMsrs);
1077 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1078
1079 /* Enable VT-x if it's not already enabled by the host. */
1080 if (!fEnabledByHost)
1081 {
1082 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
1083 if (RT_FAILURE(rc))
1084 return rc;
1085 }
1086
1087 /*
1088 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been using EPTPs) so
1089 * we don't retain any stale guest-physical mappings which won't get invalidated when flushing by VPID.
1090 */
1091 PVMXMSRS pMsrs = (PVMXMSRS)pvMsrs;
1092 if (pMsrs->u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1093 {
1094 hmR0VmxFlushEpt(NULL /* pVCpu */, VMXFLUSHEPT_ALL_CONTEXTS);
1095 pCpu->fFlushAsidBeforeUse = false;
1096 }
1097 else
1098 pCpu->fFlushAsidBeforeUse = true;
1099
1100 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
1101 ++pCpu->cTlbFlushes;
1102
1103 return VINF_SUCCESS;
1104}
1105
1106
1107/**
1108 * Deactivates VT-x on the current CPU.
1109 *
1110 * @returns VBox status code.
1111 * @param pCpu Pointer to the global CPU info struct.
1112 * @param pvCpuPage Pointer to the VMXON region.
1113 * @param HCPhysCpuPage Physical address of the VMXON region.
1114 *
1115 * @remarks This function should never be called when SUPR0EnableVTx() or
1116 * similar was used to enable VT-x on the host.
1117 */
1118VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
1119{
1120 NOREF(pCpu);
1121 NOREF(pvCpuPage);
1122 NOREF(HCPhysCpuPage);
1123
1124 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1125 return hmR0VmxLeaveRootMode();
1126}
1127
1128
1129/**
1130 * Sets the permission bits for the specified MSR in the MSR bitmap.
1131 *
1132 * @param pVCpu Pointer to the VMCPU.
1133 * @param uMSR The MSR value.
1134 * @param enmRead Whether reading this MSR causes a VM-exit.
1135 * @param enmWrite Whether writing this MSR causes a VM-exit.
1136 */
1137static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
1138{
1139 int32_t iBit;
1140 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1141
1142 /*
1143 * Layout:
1144 * 0x000 - 0x3ff - Low MSR read bits
1145 * 0x400 - 0x7ff - High MSR read bits
1146 * 0x800 - 0xbff - Low MSR write bits
1147 * 0xc00 - 0xfff - High MSR write bits
1148 */
1149 if (uMsr <= 0x00001FFF)
1150 iBit = uMsr;
1151 else if ( uMsr >= 0xC0000000
1152 && uMsr <= 0xC0001FFF)
1153 {
1154 iBit = (uMsr - 0xC0000000);
1155 pbMsrBitmap += 0x400;
1156 }
1157 else
1158 AssertMsgFailedReturnVoid(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1159
1160 Assert(iBit <= 0x1fff);
1161 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1162 ASMBitSet(pbMsrBitmap, iBit);
1163 else
1164 ASMBitClear(pbMsrBitmap, iBit);
1165
1166 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1167 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1168 else
1169 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1170}
1171
1172
1173#ifdef VBOX_STRICT
1174/**
1175 * Gets the permission bits for the specified MSR in the MSR bitmap.
1176 *
1177 * @returns VBox status code.
1178 * @retval VINF_SUCCESS if the specified MSR is found.
1179 * @retval VERR_NOT_FOUND if the specified MSR is not found.
1180 * @retval VERR_NOT_SUPPORTED if VT-x doesn't allow the MSR.
1181 *
1182 * @param pVCpu Pointer to the VMCPU.
1183 * @param uMsr The MSR.
1184 * @param penmRead Where to store the read permissions.
1185 * @param penmWrite Where to store the write permissions.
1186 */
1187static int hmR0VmxGetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, PVMXMSREXITREAD penmRead, PVMXMSREXITWRITE penmWrite)
1188{
1189 AssertPtrReturn(penmRead, VERR_INVALID_PARAMETER);
1190 AssertPtrReturn(penmWrite, VERR_INVALID_PARAMETER);
1191 int32_t iBit;
1192 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1193
1194 /* See hmR0VmxSetMsrPermission() for the layout. */
1195 if (uMsr <= 0x00001FFF)
1196 iBit = uMsr;
1197 else if ( uMsr >= 0xC0000000
1198 && uMsr <= 0xC0001FFF)
1199 {
1200 iBit = (uMsr - 0xC0000000);
1201 pbMsrBitmap += 0x400;
1202 }
1203 else
1204 AssertMsgFailedReturn(("hmR0VmxGetMsrPermission: Invalid MSR %#RX32\n", uMsr), VERR_NOT_SUPPORTED);
1205
1206 Assert(iBit <= 0x1fff);
1207 if (ASMBitTest(pbMsrBitmap, iBit))
1208 *penmRead = VMXMSREXIT_INTERCEPT_READ;
1209 else
1210 *penmRead = VMXMSREXIT_PASSTHRU_READ;
1211
1212 if (ASMBitTest(pbMsrBitmap + 0x800, iBit))
1213 *penmWrite = VMXMSREXIT_INTERCEPT_WRITE;
1214 else
1215 *penmWrite = VMXMSREXIT_PASSTHRU_WRITE;
1216 return VINF_SUCCESS;
1217}
1218#endif /* VBOX_STRICT */
1219
1220
1221/**
1222 * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
1223 * area.
1224 *
1225 * @returns VBox status code.
1226 * @param pVCpu Pointer to the VMCPU.
1227 * @param cMsrs The number of MSRs.
1228 */
1229DECLINLINE(int) hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)
1230{
1231 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
1232 uint32_t const cMaxSupportedMsrs = MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc);
1233 if (RT_UNLIKELY(cMsrs > cMaxSupportedMsrs))
1234 {
1235 LogRel(("CPU auto-load/store MSR count in VMCS exceeded cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs));
1236 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
1237 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1238 }
1239
1240 /* Update number of guest MSRs to load/store across the world-switch. */
1241 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs); AssertRCReturn(rc, rc);
1242 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs); AssertRCReturn(rc, rc);
1243
1244 /* Update number of host MSRs to load after the world-switch. Identical to guest-MSR count as it's always paired. */
1245 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs); AssertRCReturn(rc, rc);
1246
1247 /* Update the VCPU's copy of the MSR count. */
1248 pVCpu->hm.s.vmx.cMsrs = cMsrs;
1249
1250 return VINF_SUCCESS;
1251}
1252
1253
1254/**
1255 * Adds a new (or updates the value of an existing) guest/host MSR
1256 * pair to be swapped during the world-switch as part of the
1257 * auto-load/store MSR area in the VMCS.
1258 *
1259 * @returns true if the MSR was added -and- its value was updated, false
1260 * otherwise.
1261 * @param pVCpu Pointer to the VMCPU.
1262 * @param uMsr The MSR.
1263 * @param uGuestMsr Value of the guest MSR.
1264 * @param fUpdateHostMsr Whether to update the value of the host MSR if
1265 * necessary.
1266 */
1267static bool hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr)
1268{
1269 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1270 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1271 uint32_t i;
1272 for (i = 0; i < cMsrs; i++)
1273 {
1274 if (pGuestMsr->u32Msr == uMsr)
1275 break;
1276 pGuestMsr++;
1277 }
1278
1279 bool fAdded = false;
1280 if (i == cMsrs)
1281 {
1282 ++cMsrs;
1283 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1284 AssertRC(rc);
1285
1286 /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */
1287 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1288 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1289
1290 fAdded = true;
1291 }
1292
1293 /* Update the MSR values in the auto-load/store MSR area. */
1294 pGuestMsr->u32Msr = uMsr;
1295 pGuestMsr->u64Value = uGuestMsrValue;
1296
1297 /* Create/update the MSR slot in the host MSR area. */
1298 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1299 pHostMsr += i;
1300 pHostMsr->u32Msr = uMsr;
1301
1302 /*
1303 * Update the host MSR only when requested by the caller AND when we're
1304 * adding it to the auto-load/store area. Otherwise, it would have been
1305 * updated by hmR0VmxSaveHostMsrs(). We do this for performance reasons.
1306 */
1307 bool fUpdatedMsrValue = false;
1308 if ( fAdded
1309 && fUpdateHostMsr)
1310 {
1311 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1312 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1313 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1314 fUpdatedMsrValue = true;
1315 }
1316
1317 return fUpdatedMsrValue;
1318}
1319
1320
1321/**
1322 * Removes a guest/host MSR pair to be swapped during the world-switch from the
1323 * auto-load/store MSR area in the VMCS.
1324 *
1325 * @returns VBox status code.
1326 * @param pVCpu Pointer to the VMCPU.
1327 * @param uMsr The MSR.
1328 */
1329static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr)
1330{
1331 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1332 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1333 for (uint32_t i = 0; i < cMsrs; i++)
1334 {
1335 /* Find the MSR. */
1336 if (pGuestMsr->u32Msr == uMsr)
1337 {
1338 /* If it's the last MSR, simply reduce the count. */
1339 if (i == cMsrs - 1)
1340 {
1341 --cMsrs;
1342 break;
1343 }
1344
1345 /* Remove it by swapping the last MSR in place of it, and reducing the count. */
1346 PVMXAUTOMSR pLastGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1347 pLastGuestMsr += cMsrs - 1;
1348 pGuestMsr->u32Msr = pLastGuestMsr->u32Msr;
1349 pGuestMsr->u64Value = pLastGuestMsr->u64Value;
1350
1351 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1352 PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1353 pLastHostMsr += cMsrs - 1;
1354 pHostMsr->u32Msr = pLastHostMsr->u32Msr;
1355 pHostMsr->u64Value = pLastHostMsr->u64Value;
1356 --cMsrs;
1357 break;
1358 }
1359 pGuestMsr++;
1360 }
1361
1362 /* Update the VMCS if the count changed (meaning the MSR was found). */
1363 if (cMsrs != pVCpu->hm.s.vmx.cMsrs)
1364 {
1365 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1366 AssertRCReturn(rc, rc);
1367
1368 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
1369 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1370 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
1371
1372 Log4(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));
1373 return VINF_SUCCESS;
1374 }
1375
1376 return VERR_NOT_FOUND;
1377}
1378
1379
1380/**
1381 * Checks if the specified guest MSR is part of the auto-load/store area in
1382 * the VMCS.
1383 *
1384 * @returns true if found, false otherwise.
1385 * @param pVCpu Pointer to the VMCPU.
1386 * @param uMsr The MSR to find.
1387 */
1388static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1389{
1390 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1391 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1392
1393 for (uint32_t i = 0; i < cMsrs; i++, pGuestMsr++)
1394 {
1395 if (pGuestMsr->u32Msr == uMsr)
1396 return true;
1397 }
1398 return false;
1399}
1400
1401
1402/**
1403 * Updates the value of all host MSRs in the auto-load/store area in the VMCS.
1404 *
1405 * @param pVCpu Pointer to the VMCPU.
1406 *
1407 * @remarks No-long-jump zone!!!
1408 */
1409static void hmR0VmxUpdateAutoLoadStoreHostMsrs(PVMCPU pVCpu)
1410{
1411 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1412 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1413 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1414 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1415
1416 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1417 {
1418 AssertReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr);
1419
1420 /*
1421 * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
1422 * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
1423 */
1424 if (pHostMsr->u32Msr == MSR_K6_EFER)
1425 pHostMsr->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostEfer;
1426 else
1427 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1428 }
1429
1430 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
1431}
1432
1433
1434#if HC_ARCH_BITS == 64
1435/**
1436 * Saves a set of host MSRs to allow read/write passthru access to the guest and
1437 * perform lazy restoration of the host MSRs while leaving VT-x.
1438 *
1439 * @param pVCpu Pointer to the VMCPU.
1440 *
1441 * @remarks No-long-jump zone!!!
1442 */
1443static void hmR0VmxLazySaveHostMsrs(PVMCPU pVCpu)
1444{
1445 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1446
1447 /*
1448 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls().
1449 */
1450 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
1451 {
1452 pVCpu->hm.s.vmx.u64HostLStarMsr = ASMRdMsr(MSR_K8_LSTAR);
1453 pVCpu->hm.s.vmx.u64HostStarMsr = ASMRdMsr(MSR_K6_STAR);
1454 pVCpu->hm.s.vmx.u64HostSFMaskMsr = ASMRdMsr(MSR_K8_SF_MASK);
1455 pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1456 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;
1457 }
1458}
1459
1460
1461/**
1462 * Checks whether the MSR belongs to the set of guest MSRs that we restore
1463 * lazily while leaving VT-x.
1464 *
1465 * @returns true if it does, false otherwise.
1466 * @param pVCpu Pointer to the VMCPU.
1467 * @param uMsr The MSR to check.
1468 */
1469static bool hmR0VmxIsLazyGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1470{
1471 NOREF(pVCpu);
1472 switch (uMsr)
1473 {
1474 case MSR_K8_LSTAR:
1475 case MSR_K6_STAR:
1476 case MSR_K8_SF_MASK:
1477 case MSR_K8_KERNEL_GS_BASE:
1478 return true;
1479 }
1480 return false;
1481}
1482
1483
1484/**
1485 * Saves a set of guest MSRs back into the guest-CPU context.
1486 *
1487 * @param pVCpu Pointer to the VMCPU.
1488 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1489 * out-of-sync. Make sure to update the required fields
1490 * before using them.
1491 *
1492 * @remarks No-long-jump zone!!!
1493 */
1494static void hmR0VmxLazySaveGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1495{
1496 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1497 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1498
1499 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1500 {
1501 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1502 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
1503 pMixedCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
1504 pMixedCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
1505 pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1506 }
1507}
1508
1509
1510/**
1511 * Loads a set of guests MSRs to allow read/passthru to the guest.
1512 *
1513 * The name of this function is slightly confusing. This function does NOT
1514 * postpone loading, but loads the MSR right now. "hmR0VmxLazy" is simply a
1515 * common prefix for functions dealing with "lazy restoration" of the shared
1516 * MSRs.
1517 *
1518 * @param pVCpu Pointer to the VMCPU.
1519 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1520 * out-of-sync. Make sure to update the required fields
1521 * before using them.
1522 *
1523 * @remarks No-long-jump zone!!!
1524 */
1525static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1526{
1527 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1528 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1529
1530#define VMXLOCAL_LAZY_LOAD_GUEST_MSR(uMsr, a_GuestMsr, a_HostMsr) \
1531 do { \
1532 if (pMixedCtx->msr##a_GuestMsr != pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr) \
1533 ASMWrMsr(uMsr, pMixedCtx->msr##a_GuestMsr); \
1534 else \
1535 Assert(ASMRdMsr(uMsr) == pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr); \
1536 } while (0)
1537
1538 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1539 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
1540 {
1541 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_LSTAR, LSTAR, LStar);
1542 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K6_STAR, STAR, Star);
1543 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, SFMask);
1544 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBase);
1545 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;
1546 }
1547 else
1548 {
1549 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR);
1550 ASMWrMsr(MSR_K6_STAR, pMixedCtx->msrSTAR);
1551 ASMWrMsr(MSR_K8_SF_MASK, pMixedCtx->msrSFMASK);
1552 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE);
1553 }
1554
1555#undef VMXLOCAL_LAZY_LOAD_GUEST_MSR
1556}
1557
1558
1559/**
1560 * Performs lazy restoration of the set of host MSRs if they were previously
1561 * loaded with guest MSR values.
1562 *
1563 * @param pVCpu Pointer to the VMCPU.
1564 *
1565 * @remarks No-long-jump zone!!!
1566 * @remarks The guest MSRs should have been saved back into the guest-CPU
1567 * context by hmR0VmxSaveGuestLazyMsrs()!!!
1568 */
1569static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu)
1570{
1571 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1572 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1573
1574 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1575 {
1576 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1577 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64HostLStarMsr);
1578 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64HostStarMsr);
1579 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64HostSFMaskMsr);
1580 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr);
1581 }
1582 pVCpu->hm.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST);
1583}
1584#endif /* HC_ARCH_BITS == 64 */
1585
1586
1587/**
1588 * Verifies that our cached values of the VMCS controls are all
1589 * consistent with what's actually present in the VMCS.
1590 *
1591 * @returns VBox status code.
1592 * @param pVCpu Pointer to the VMCPU.
1593 */
1594static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu)
1595{
1596 uint32_t u32Val;
1597 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
1598 AssertRCReturn(rc, rc);
1599 AssertMsgReturn(pVCpu->hm.s.vmx.u32EntryCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32EntryCtls, u32Val),
1600 VERR_VMX_ENTRY_CTLS_CACHE_INVALID);
1601
1602 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
1603 AssertRCReturn(rc, rc);
1604 AssertMsgReturn(pVCpu->hm.s.vmx.u32ExitCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ExitCtls, u32Val),
1605 VERR_VMX_EXIT_CTLS_CACHE_INVALID);
1606
1607 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1608 AssertRCReturn(rc, rc);
1609 AssertMsgReturn(pVCpu->hm.s.vmx.u32PinCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32PinCtls, u32Val),
1610 VERR_VMX_PIN_EXEC_CTLS_CACHE_INVALID);
1611
1612 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1613 AssertRCReturn(rc, rc);
1614 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls, u32Val),
1615 VERR_VMX_PROC_EXEC_CTLS_CACHE_INVALID);
1616
1617 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1618 {
1619 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1620 AssertRCReturn(rc, rc);
1621 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls2 == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls2, u32Val),
1622 VERR_VMX_PROC_EXEC2_CTLS_CACHE_INVALID);
1623 }
1624
1625 return VINF_SUCCESS;
1626}
1627
1628
1629#ifdef VBOX_STRICT
1630/**
1631 * Verifies that our cached host EFER value has not changed
1632 * since we cached it.
1633 *
1634 * @param pVCpu Pointer to the VMCPU.
1635 */
1636static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu)
1637{
1638 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1639
1640 if (pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
1641 {
1642 uint64_t u64Val;
1643 int rc = VMXReadVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, &u64Val);
1644 AssertRC(rc);
1645
1646 uint64_t u64HostEferMsr = ASMRdMsr(MSR_K6_EFER);
1647 AssertMsgReturnVoid(u64HostEferMsr == u64Val, ("u64HostEferMsr=%#RX64 u64Val=%#RX64\n", u64HostEferMsr, u64Val));
1648 }
1649}
1650
1651
1652/**
1653 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the
1654 * VMCS are correct.
1655 *
1656 * @param pVCpu Pointer to the VMCPU.
1657 */
1658static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu)
1659{
1660 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1661
1662 /* Verify MSR counts in the VMCS are what we think it should be. */
1663 uint32_t cMsrs;
1664 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1665 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1666
1667 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs); AssertRC(rc);
1668 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1669
1670 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1671 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1672
1673 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1674 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1675 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1676 {
1677 /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */
1678 AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32 cMsrs=%u\n", pHostMsr->u32Msr,
1679 pGuestMsr->u32Msr, cMsrs));
1680
1681 uint64_t u64Msr = ASMRdMsr(pHostMsr->u32Msr);
1682 AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
1683 pHostMsr->u32Msr, pHostMsr->u64Value, u64Msr, cMsrs));
1684
1685 /* Verify that the permissions are as expected in the MSR bitmap. */
1686 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1687 {
1688 VMXMSREXITREAD enmRead;
1689 VMXMSREXITWRITE enmWrite;
1690 rc = hmR0VmxGetMsrPermission(pVCpu, pGuestMsr->u32Msr, &enmRead, &enmWrite);
1691 AssertMsgReturnVoid(rc == VINF_SUCCESS, ("hmR0VmxGetMsrPermission! failed. rc=%Rrc\n", rc));
1692 if (pGuestMsr->u32Msr == MSR_K6_EFER)
1693 {
1694 AssertMsgReturnVoid(enmRead == VMXMSREXIT_INTERCEPT_READ, ("Passthru read for EFER!?\n"));
1695 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_INTERCEPT_WRITE, ("Passthru write for EFER!?\n"));
1696 }
1697 else
1698 {
1699 AssertMsgReturnVoid(enmRead == VMXMSREXIT_PASSTHRU_READ, ("u32Msr=%#RX32 cMsrs=%u No passthru read!\n",
1700 pGuestMsr->u32Msr, cMsrs));
1701 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 cMsrs=%u No passthru write!\n",
1702 pGuestMsr->u32Msr, cMsrs));
1703 }
1704 }
1705 }
1706}
1707#endif /* VBOX_STRICT */
1708
1709
1710/**
1711 * Flushes the TLB using EPT.
1712 *
1713 * @returns VBox status code.
1714 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1715 * enmFlush).
1716 * @param enmFlush Type of flush.
1717 *
1718 * @remarks Caller is responsible for making sure this function is called only
1719 * when NestedPaging is supported and providing @a enmFlush that is
1720 * supported by the CPU.
1721 * @remarks Can be called with interrupts disabled.
1722 */
1723static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush)
1724{
1725 uint64_t au64Descriptor[2];
1726 if (enmFlush == VMXFLUSHEPT_ALL_CONTEXTS)
1727 au64Descriptor[0] = 0;
1728 else
1729 {
1730 Assert(pVCpu);
1731 au64Descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1732 }
1733 au64Descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1734
1735 int rc = VMXR0InvEPT(enmFlush, &au64Descriptor[0]);
1736 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0,
1737 rc));
1738 if ( RT_SUCCESS(rc)
1739 && pVCpu)
1740 {
1741 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1742 }
1743}
1744
1745
1746/**
1747 * Flushes the TLB using VPID.
1748 *
1749 * @returns VBox status code.
1750 * @param pVM Pointer to the VM.
1751 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1752 * enmFlush).
1753 * @param enmFlush Type of flush.
1754 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1755 * on @a enmFlush).
1756 *
1757 * @remarks Can be called with interrupts disabled.
1758 */
1759static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr)
1760{
1761 NOREF(pVM);
1762 AssertPtr(pVM);
1763 Assert(pVM->hm.s.vmx.fVpid);
1764
1765 uint64_t au64Descriptor[2];
1766 if (enmFlush == VMXFLUSHVPID_ALL_CONTEXTS)
1767 {
1768 au64Descriptor[0] = 0;
1769 au64Descriptor[1] = 0;
1770 }
1771 else
1772 {
1773 AssertPtr(pVCpu);
1774 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1775 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1776 au64Descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1777 au64Descriptor[1] = GCPtr;
1778 }
1779
1780 int rc = VMXR0InvVPID(enmFlush, &au64Descriptor[0]); NOREF(rc);
1781 AssertMsg(rc == VINF_SUCCESS,
1782 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1783 if ( RT_SUCCESS(rc)
1784 && pVCpu)
1785 {
1786 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1787 }
1788}
1789
1790
1791/**
1792 * Invalidates a guest page by guest virtual address. Only relevant for
1793 * EPT/VPID, otherwise there is nothing really to invalidate.
1794 *
1795 * @returns VBox status code.
1796 * @param pVM Pointer to the VM.
1797 * @param pVCpu Pointer to the VMCPU.
1798 * @param GCVirt Guest virtual address of the page to invalidate.
1799 */
1800VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1801{
1802 AssertPtr(pVM);
1803 AssertPtr(pVCpu);
1804 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
1805
1806 bool fFlushPending = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
1807 if (!fFlushPending)
1808 {
1809 /*
1810 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
1811 * See @bugref{6043} and @bugref{6177}.
1812 *
1813 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
1814 * function maybe called in a loop with individual addresses.
1815 */
1816 if (pVM->hm.s.vmx.fVpid)
1817 {
1818 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1819 {
1820 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, GCVirt);
1821 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1822 }
1823 else
1824 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1825 }
1826 else if (pVM->hm.s.fNestedPaging)
1827 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1828 }
1829
1830 return VINF_SUCCESS;
1831}
1832
1833
1834/**
1835 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
1836 * otherwise there is nothing really to invalidate.
1837 *
1838 * @returns VBox status code.
1839 * @param pVM Pointer to the VM.
1840 * @param pVCpu Pointer to the VMCPU.
1841 * @param GCPhys Guest physical address of the page to invalidate.
1842 */
1843VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1844{
1845 NOREF(pVM); NOREF(GCPhys);
1846 LogFlowFunc(("%RGp\n", GCPhys));
1847
1848 /*
1849 * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
1850 * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
1851 * This function might be called in a loop. This should cause a flush-by-EPT if EPT is in use. See @bugref{6568}.
1852 */
1853 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1854 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
1855 return VINF_SUCCESS;
1856}
1857
1858
1859/**
1860 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1861 * case where neither EPT nor VPID is supported by the CPU.
1862 *
1863 * @param pVM Pointer to the VM.
1864 * @param pVCpu Pointer to the VMCPU.
1865 * @param pCpu Pointer to the global HM struct.
1866 *
1867 * @remarks Called with interrupts disabled.
1868 */
1869static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1870{
1871 AssertPtr(pVCpu);
1872 AssertPtr(pCpu);
1873 NOREF(pVM);
1874
1875 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1876
1877 /** @todo TLB shootdown is currently not used. See hmQueueInvlPage(). */
1878#if 0
1879 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1880 pVCpu->hm.s.TlbShootdown.cPages = 0;
1881#endif
1882
1883 Assert(pCpu->idCpu != NIL_RTCPUID);
1884 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1885 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1886 pVCpu->hm.s.fForceTLBFlush = false;
1887 return;
1888}
1889
1890
1891/**
1892 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1893 *
1894 * @param pVM Pointer to the VM.
1895 * @param pVCpu Pointer to the VMCPU.
1896 * @param pCpu Pointer to the global HM CPU struct.
1897 * @remarks All references to "ASID" in this function pertains to "VPID" in
1898 * Intel's nomenclature. The reason is, to avoid confusion in compare
1899 * statements since the host-CPU copies are named "ASID".
1900 *
1901 * @remarks Called with interrupts disabled.
1902 */
1903static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1904{
1905#ifdef VBOX_WITH_STATISTICS
1906 bool fTlbFlushed = false;
1907# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
1908# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
1909 if (!fTlbFlushed) \
1910 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
1911 } while (0)
1912#else
1913# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
1914# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
1915#endif
1916
1917 AssertPtr(pVM);
1918 AssertPtr(pCpu);
1919 AssertPtr(pVCpu);
1920 Assert(pCpu->idCpu != NIL_RTCPUID);
1921
1922 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1923 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1924 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1925
1926 /*
1927 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1928 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1929 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1930 */
1931 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1932 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1933 {
1934 ++pCpu->uCurrentAsid;
1935 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1936 {
1937 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
1938 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1939 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1940 }
1941
1942 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1943 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1944 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1945
1946 /*
1947 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
1948 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
1949 */
1950 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1951 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1952 HMVMX_SET_TAGGED_TLB_FLUSHED();
1953 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); /* Already flushed-by-EPT, skip doing it again below. */
1954 }
1955
1956 /* Check for explicit TLB shootdowns. */
1957 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1958 {
1959 /*
1960 * Changes to the EPT paging structure by VMM requires flushing by EPT as the CPU creates
1961 * guest-physical (only EPT-tagged) mappings while traversing the EPT tables when EPT is in use.
1962 * Flushing by VPID will only flush linear (only VPID-tagged) and combined (EPT+VPID tagged) mappings
1963 * but not guest-physical mappings.
1964 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}.
1965 */
1966 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1967 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1968 HMVMX_SET_TAGGED_TLB_FLUSHED();
1969 }
1970
1971 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
1972 * where it is commented out. Support individual entry flushing
1973 * someday. */
1974#if 0
1975 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1976 {
1977 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1978
1979 /*
1980 * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
1981 * as supported by the CPU.
1982 */
1983 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1984 {
1985 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1986 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1987 }
1988 else
1989 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1990
1991 HMVMX_SET_TAGGED_TLB_FLUSHED();
1992 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1993 pVCpu->hm.s.TlbShootdown.cPages = 0;
1994 }
1995#endif
1996
1997 pVCpu->hm.s.fForceTLBFlush = false;
1998
1999 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
2000
2001 Assert(pVCpu->hm.s.idLastCpu == pCpu->idCpu);
2002 Assert(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes);
2003 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2004 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2005 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2006 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
2007 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2008 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2009 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2010
2011 /* Update VMCS with the VPID. */
2012 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
2013 AssertRC(rc);
2014
2015#undef HMVMX_SET_TAGGED_TLB_FLUSHED
2016}
2017
2018
2019/**
2020 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
2021 *
2022 * @returns VBox status code.
2023 * @param pVM Pointer to the VM.
2024 * @param pVCpu Pointer to the VMCPU.
2025 * @param pCpu Pointer to the global HM CPU struct.
2026 *
2027 * @remarks Called with interrupts disabled.
2028 */
2029static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2030{
2031 AssertPtr(pVM);
2032 AssertPtr(pVCpu);
2033 AssertPtr(pCpu);
2034 Assert(pCpu->idCpu != NIL_RTCPUID);
2035 AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
2036 AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
2037
2038 /*
2039 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
2040 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
2041 */
2042 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2043 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2044 {
2045 pVCpu->hm.s.fForceTLBFlush = true;
2046 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2047 }
2048
2049 /* Check for explicit TLB shootdown flushes. */
2050 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2051 {
2052 pVCpu->hm.s.fForceTLBFlush = true;
2053 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2054 }
2055
2056 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2057 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2058
2059 if (pVCpu->hm.s.fForceTLBFlush)
2060 {
2061 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2062 pVCpu->hm.s.fForceTLBFlush = false;
2063 }
2064 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
2065 * where it is commented out. Support individual entry flushing
2066 * someday. */
2067#if 0
2068 else
2069 {
2070 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2071 {
2072 /* We cannot flush individual entries without VPID support. Flush using EPT. */
2073 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
2074 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2075 }
2076 else
2077 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
2078
2079 pVCpu->hm.s.TlbShootdown.cPages = 0;
2080 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2081 }
2082#endif
2083}
2084
2085
2086/**
2087 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
2088 *
2089 * @returns VBox status code.
2090 * @param pVM Pointer to the VM.
2091 * @param pVCpu Pointer to the VMCPU.
2092 * @param pCpu Pointer to the global HM CPU struct.
2093 *
2094 * @remarks Called with interrupts disabled.
2095 */
2096static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2097{
2098 AssertPtr(pVM);
2099 AssertPtr(pVCpu);
2100 AssertPtr(pCpu);
2101 Assert(pCpu->idCpu != NIL_RTCPUID);
2102 AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
2103 AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
2104
2105 /*
2106 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
2107 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
2108 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
2109 */
2110 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2111 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2112 {
2113 pVCpu->hm.s.fForceTLBFlush = true;
2114 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2115 }
2116
2117 /* Check for explicit TLB shootdown flushes. */
2118 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2119 {
2120 /*
2121 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
2122 * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
2123 * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
2124 */
2125 pVCpu->hm.s.fForceTLBFlush = true;
2126 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2127 }
2128
2129 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2130 if (pVCpu->hm.s.fForceTLBFlush)
2131 {
2132 ++pCpu->uCurrentAsid;
2133 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
2134 {
2135 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
2136 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
2137 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
2138 }
2139
2140 pVCpu->hm.s.fForceTLBFlush = false;
2141 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2142 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
2143 if (pCpu->fFlushAsidBeforeUse)
2144 {
2145 if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_SINGLE_CONTEXT)
2146 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
2147 else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_ALL_CONTEXTS)
2148 {
2149 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
2150 pCpu->fFlushAsidBeforeUse = false;
2151 }
2152 else
2153 {
2154 /* hmR0VmxSetupTaggedTlb() ensures we never get here. Paranoia. */
2155 AssertMsgFailed(("Unsupported VPID-flush context type.\n"));
2156 }
2157 }
2158 }
2159 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
2160 * where it is commented out. Support individual entry flushing
2161 * someday. */
2162#if 0
2163 else
2164 {
2165 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
2166 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
2167 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
2168 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
2169
2170 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2171 {
2172 /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
2173 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2174 {
2175 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
2176 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
2177 }
2178 else
2179 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
2180
2181 pVCpu->hm.s.TlbShootdown.cPages = 0;
2182 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2183 }
2184 else
2185 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
2186 }
2187#endif
2188
2189 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2190 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2191 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2192 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
2193 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2194 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2195 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2196
2197 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
2198 AssertRC(rc);
2199}
2200
2201
2202/**
2203 * Flushes the guest TLB entry based on CPU capabilities.
2204 *
2205 * @param pVCpu Pointer to the VMCPU.
2206 * @param pCpu Pointer to the global HM CPU struct.
2207 */
2208DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2209{
2210#ifdef HMVMX_ALWAYS_FLUSH_TLB
2211 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2212#endif
2213 PVM pVM = pVCpu->CTX_SUFF(pVM);
2214 switch (pVM->hm.s.vmx.uFlushTaggedTlb)
2215 {
2216 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu, pCpu); break;
2217 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu, pCpu); break;
2218 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu, pCpu); break;
2219 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu, pCpu); break;
2220 default:
2221 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
2222 break;
2223 }
2224
2225 /* VMCPU_FF_TLB_SHOOTDOWN is unused. */
2226 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN));
2227
2228 /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */
2229}
2230
2231
2232/**
2233 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
2234 * TLB entries from the host TLB before VM-entry.
2235 *
2236 * @returns VBox status code.
2237 * @param pVM Pointer to the VM.
2238 */
2239static int hmR0VmxSetupTaggedTlb(PVM pVM)
2240{
2241 /*
2242 * Determine optimal flush type for Nested Paging.
2243 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
2244 * guest execution (see hmR3InitFinalizeR0()).
2245 */
2246 if (pVM->hm.s.fNestedPaging)
2247 {
2248 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
2249 {
2250 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
2251 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_SINGLE_CONTEXT;
2252 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
2253 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_ALL_CONTEXTS;
2254 else
2255 {
2256 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
2257 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2258 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2259 }
2260
2261 /* Make sure the write-back cacheable memory type for EPT is supported. */
2262 if (!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
2263 {
2264 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.Msrs.u64EptVpidCaps));
2265 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2266 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2267 }
2268 }
2269 else
2270 {
2271 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
2272 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2273 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2274 }
2275 }
2276
2277 /*
2278 * Determine optimal flush type for VPID.
2279 */
2280 if (pVM->hm.s.vmx.fVpid)
2281 {
2282 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
2283 {
2284 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
2285 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_SINGLE_CONTEXT;
2286 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
2287 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_ALL_CONTEXTS;
2288 else
2289 {
2290 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
2291 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2292 LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
2293 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
2294 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
2295 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
2296 pVM->hm.s.vmx.fVpid = false;
2297 }
2298 }
2299 else
2300 {
2301 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
2302 Log4(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
2303 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
2304 pVM->hm.s.vmx.fVpid = false;
2305 }
2306 }
2307
2308 /*
2309 * Setup the handler for flushing tagged-TLBs.
2310 */
2311 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
2312 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID;
2313 else if (pVM->hm.s.fNestedPaging)
2314 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT;
2315 else if (pVM->hm.s.vmx.fVpid)
2316 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID;
2317 else
2318 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE;
2319 return VINF_SUCCESS;
2320}
2321
2322
2323/**
2324 * Sets up pin-based VM-execution controls in the VMCS.
2325 *
2326 * @returns VBox status code.
2327 * @param pVM Pointer to the VM.
2328 * @param pVCpu Pointer to the VMCPU.
2329 */
2330static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
2331{
2332 AssertPtr(pVM);
2333 AssertPtr(pVCpu);
2334
2335 uint32_t val = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */
2336 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
2337
2338 val |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts cause a VM-exit. */
2339 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */
2340
2341 if (pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
2342 val |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */
2343
2344 /* Enable the VMX preemption timer. */
2345 if (pVM->hm.s.vmx.fUsePreemptTimer)
2346 {
2347 Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
2348 val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
2349 }
2350
2351 if ((val & zap) != val)
2352 {
2353 LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
2354 pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, val, zap));
2355 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
2356 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2357 }
2358
2359 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, val);
2360 AssertRCReturn(rc, rc);
2361
2362 pVCpu->hm.s.vmx.u32PinCtls = val;
2363 return rc;
2364}
2365
2366
2367/**
2368 * Sets up processor-based VM-execution controls in the VMCS.
2369 *
2370 * @returns VBox status code.
2371 * @param pVM Pointer to the VM.
2372 * @param pVMCPU Pointer to the VMCPU.
2373 */
2374static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
2375{
2376 AssertPtr(pVM);
2377 AssertPtr(pVCpu);
2378
2379 int rc = VERR_INTERNAL_ERROR_5;
2380 uint32_t val = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */
2381 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2382
2383 val |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */
2384 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
2385 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
2386 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
2387 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */
2388 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */
2389 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
2390
2391 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
2392 if ( !(pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
2393 || (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
2394 {
2395 LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
2396 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
2397 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2398 }
2399
2400 /* Without Nested Paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
2401 if (!pVM->hm.s.fNestedPaging)
2402 {
2403 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
2404 val |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
2405 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
2406 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
2407 }
2408
2409 /* Use TPR shadowing if supported by the CPU. */
2410 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
2411 {
2412 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2413 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
2414 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
2415 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
2416 AssertRCReturn(rc, rc);
2417
2418 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
2419 /* CR8 writes cause a VM-exit based on TPR threshold. */
2420 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
2421 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
2422 }
2423 else
2424 {
2425 /*
2426 * Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is invalid on 32-bit Intel CPUs.
2427 * Set this control only for 64-bit guests.
2428 */
2429 if (pVM->hm.s.fAllow64BitGuests)
2430 {
2431 val |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */
2432 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */
2433 }
2434 }
2435
2436 /* Use MSR-bitmaps if supported by the CPU. */
2437 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
2438 {
2439 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
2440
2441 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2442 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
2443 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2444 AssertRCReturn(rc, rc);
2445
2446 /*
2447 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
2448 * automatically using dedicated fields in the VMCS.
2449 */
2450 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2451 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2452 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2453 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2454 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2455
2456#if HC_ARCH_BITS == 64
2457 /*
2458 * Set passthru permissions for the following MSRs (mandatory for VT-x) required for 64-bit guests.
2459 */
2460 if (pVM->hm.s.fAllow64BitGuests)
2461 {
2462 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2463 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2464 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2465 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2466 }
2467#endif
2468 }
2469
2470 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
2471 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
2472 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
2473
2474 if ((val & zap) != val)
2475 {
2476 LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
2477 pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, val, zap));
2478 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
2479 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2480 }
2481
2482 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, val);
2483 AssertRCReturn(rc, rc);
2484
2485 pVCpu->hm.s.vmx.u32ProcCtls = val;
2486
2487 /*
2488 * Secondary processor-based VM-execution controls.
2489 */
2490 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
2491 {
2492 val = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
2493 zap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2494
2495 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
2496 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
2497
2498 if (pVM->hm.s.fNestedPaging)
2499 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */
2500 else
2501 {
2502 /*
2503 * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
2504 * VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT when INVPCID is executed by the guest.
2505 * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
2506 */
2507 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
2508 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
2509 }
2510
2511 if (pVM->hm.s.vmx.fVpid)
2512 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */
2513
2514 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2515 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
2516
2517 /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
2518 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
2519 * done dynamically. */
2520 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
2521 {
2522 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
2523 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
2524 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
2525 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
2526 AssertRCReturn(rc, rc);
2527 }
2528
2529 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
2530 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
2531
2532 if ((val & zap) != val)
2533 {
2534 LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! "
2535 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, val, zap));
2536 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
2537 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2538 }
2539
2540 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, val);
2541 AssertRCReturn(rc, rc);
2542
2543 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
2544 }
2545 else if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest))
2546 {
2547 LogRel(("hmR0VmxSetupProcCtls: Unrestricted Guest set as true when secondary processor-based VM-execution controls not "
2548 "available\n"));
2549 pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;
2550 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2551 }
2552
2553 return VINF_SUCCESS;
2554}
2555
2556
2557/**
2558 * Sets up miscellaneous (everything other than Pin & Processor-based
2559 * VM-execution) control fields in the VMCS.
2560 *
2561 * @returns VBox status code.
2562 * @param pVM Pointer to the VM.
2563 * @param pVCpu Pointer to the VMCPU.
2564 */
2565static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
2566{
2567 NOREF(pVM);
2568 AssertPtr(pVM);
2569 AssertPtr(pVCpu);
2570
2571 int rc = VERR_GENERAL_FAILURE;
2572
2573 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2574#if 0
2575 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestCR3AndCR4())*/
2576 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); AssertRCReturn(rc, rc);
2577 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); AssertRCReturn(rc, rc);
2578
2579 /*
2580 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
2581 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
2582 * We thus use the exception bitmap to control it rather than use both.
2583 */
2584 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0); AssertRCReturn(rc, rc);
2585 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); AssertRCReturn(rc, rc);
2586
2587 /** @todo Explore possibility of using IO-bitmaps. */
2588 /* All IO & IOIO instructions cause VM-exits. */
2589 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); AssertRCReturn(rc, rc);
2590 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); AssertRCReturn(rc, rc);
2591
2592 /* Initialize the MSR-bitmap area. */
2593 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
2594 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); AssertRCReturn(rc, rc);
2595 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
2596#endif
2597
2598 /* Setup MSR auto-load/store area. */
2599 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
2600 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
2601 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2602 AssertRCReturn(rc, rc);
2603 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2604 AssertRCReturn(rc, rc);
2605
2606 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
2607 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
2608 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
2609 AssertRCReturn(rc, rc);
2610
2611 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
2612 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
2613 AssertRCReturn(rc, rc);
2614
2615 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2616#if 0
2617 /* Setup debug controls */
2618 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
2619 AssertRCReturn(rc, rc);
2620 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
2621 AssertRCReturn(rc, rc);
2622#endif
2623
2624 return rc;
2625}
2626
2627
2628/**
2629 * Sets up the initial exception bitmap in the VMCS based on static conditions.
2630 *
2631 * @returns VBox status code.
2632 * @param pVM Pointer to the VM.
2633 * @param pVCpu Pointer to the VMCPU.
2634 */
2635static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
2636{
2637 AssertPtr(pVM);
2638 AssertPtr(pVCpu);
2639
2640 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
2641
2642 uint32_t u32XcptBitmap = pVCpu->hm.s.fGIMTrapXcptUD ? RT_BIT(X86_XCPT_UD) : 0;
2643
2644 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
2645 if (!pVM->hm.s.fNestedPaging)
2646 u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
2647
2648 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
2649 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
2650 AssertRCReturn(rc, rc);
2651 return rc;
2652}
2653
2654
2655/**
2656 * Sets up the initial guest-state mask. The guest-state mask is consulted
2657 * before reading guest-state fields from the VMCS as VMREADs can be expensive
2658 * for the nested virtualization case (as it would cause a VM-exit).
2659 *
2660 * @param pVCpu Pointer to the VMCPU.
2661 */
2662static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
2663{
2664 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
2665 HMVMXCPU_GST_RESET_TO(pVCpu, HMVMX_UPDATED_GUEST_ALL);
2666 return VINF_SUCCESS;
2667}
2668
2669
2670/**
2671 * Does per-VM VT-x initialization.
2672 *
2673 * @returns VBox status code.
2674 * @param pVM Pointer to the VM.
2675 */
2676VMMR0DECL(int) VMXR0InitVM(PVM pVM)
2677{
2678 LogFlowFunc(("pVM=%p\n", pVM));
2679
2680 int rc = hmR0VmxStructsAlloc(pVM);
2681 if (RT_FAILURE(rc))
2682 {
2683 LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
2684 return rc;
2685 }
2686
2687 return VINF_SUCCESS;
2688}
2689
2690
2691/**
2692 * Does per-VM VT-x termination.
2693 *
2694 * @returns VBox status code.
2695 * @param pVM Pointer to the VM.
2696 */
2697VMMR0DECL(int) VMXR0TermVM(PVM pVM)
2698{
2699 LogFlowFunc(("pVM=%p\n", pVM));
2700
2701#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2702 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
2703 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
2704#endif
2705 hmR0VmxStructsFree(pVM);
2706 return VINF_SUCCESS;
2707}
2708
2709
2710/**
2711 * Sets up the VM for execution under VT-x.
2712 * This function is only called once per-VM during initialization.
2713 *
2714 * @returns VBox status code.
2715 * @param pVM Pointer to the VM.
2716 */
2717VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
2718{
2719 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
2720 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2721
2722 LogFlowFunc(("pVM=%p\n", pVM));
2723
2724 /*
2725 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
2726 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0Intel().
2727 */
2728 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
2729 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
2730 || !pVM->hm.s.vmx.pRealModeTSS))
2731 {
2732 LogRel(("VMXR0SetupVM: invalid real-on-v86 state.\n"));
2733 return VERR_INTERNAL_ERROR;
2734 }
2735
2736#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2737 /*
2738 * This is for the darwin 32-bit/PAE kernels trying to execute 64-bit guests. We don't bother with
2739 * the 32<->64 switcher in this case. This is a rare, legacy use-case with barely any test coverage.
2740 */
2741 if ( pVM->hm.s.fAllow64BitGuests
2742 && !HMVMX_IS_64BIT_HOST_MODE())
2743 {
2744 LogRel(("VMXR0SetupVM: Unsupported guest and host paging mode combination.\n"));
2745 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
2746 }
2747#endif
2748
2749 /* Initialize these always, see hmR3InitFinalizeR0().*/
2750 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NONE;
2751 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NONE;
2752
2753 /* Setup the tagged-TLB flush handlers. */
2754 int rc = hmR0VmxSetupTaggedTlb(pVM);
2755 if (RT_FAILURE(rc))
2756 {
2757 LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
2758 return rc;
2759 }
2760
2761 /* Check if we can use the VMCS controls for swapping the EFER MSR. */
2762 Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
2763#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2764 if ( HMVMX_IS_64BIT_HOST_MODE()
2765 && (pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
2766 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
2767 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR))
2768 {
2769 pVM->hm.s.vmx.fSupportsVmcsEfer = true;
2770 }
2771#endif
2772
2773 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2774 {
2775 PVMCPU pVCpu = &pVM->aCpus[i];
2776 AssertPtr(pVCpu);
2777 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
2778
2779 /* Log the VCPU pointers, useful for debugging SMP VMs. */
2780 Log4(("VMXR0SetupVM: pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
2781
2782 /* Initialize the VM-exit history array with end-of-array markers (UINT16_MAX). */
2783 Assert(!pVCpu->hm.s.idxExitHistoryFree);
2784 HMCPU_EXIT_HISTORY_RESET(pVCpu);
2785
2786 /* Set revision dword at the beginning of the VMCS structure. */
2787 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
2788
2789 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
2790 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2791 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2792 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2793
2794 /* Load this VMCS as the current VMCS. */
2795 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2796 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2797 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2798
2799 rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
2800 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2801 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2802
2803 rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
2804 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2805 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2806
2807 rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
2808 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2809 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2810
2811 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
2812 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2813 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2814
2815 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
2816 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2817 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2818
2819#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2820 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
2821 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2822 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2823#endif
2824
2825 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
2826 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2827 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2828 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2829
2830 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
2831
2832 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
2833 }
2834
2835 return VINF_SUCCESS;
2836}
2837
2838
2839/**
2840 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2841 * the VMCS.
2842 *
2843 * @returns VBox status code.
2844 * @param pVM Pointer to the VM.
2845 * @param pVCpu Pointer to the VMCPU.
2846 */
2847DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
2848{
2849 NOREF(pVM); NOREF(pVCpu);
2850
2851 RTCCUINTREG uReg = ASMGetCR0();
2852 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2853 AssertRCReturn(rc, rc);
2854
2855#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2856 /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */
2857 if (HMVMX_IS_64BIT_HOST_MODE())
2858 {
2859 uint64_t uRegCR3 = HMR0Get64bitCR3();
2860 rc = VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3);
2861 }
2862 else
2863#endif
2864 {
2865 uReg = ASMGetCR3();
2866 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2867 }
2868 AssertRCReturn(rc, rc);
2869
2870 uReg = ASMGetCR4();
2871 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2872 AssertRCReturn(rc, rc);
2873 return rc;
2874}
2875
2876
2877#if HC_ARCH_BITS == 64
2878/**
2879 * Macro for adjusting host segment selectors to satisfy VT-x's VM-entry
2880 * requirements. See hmR0VmxSaveHostSegmentRegs().
2881 */
2882# define VMXLOCAL_ADJUST_HOST_SEG(seg, selValue) \
2883 if ((selValue) & (X86_SEL_RPL | X86_SEL_LDT)) \
2884 { \
2885 bool fValidSelector = true; \
2886 if ((selValue) & X86_SEL_LDT) \
2887 { \
2888 uint32_t uAttr = ASMGetSegAttr((selValue)); \
2889 fValidSelector = RT_BOOL(uAttr != UINT32_MAX && (uAttr & X86_DESC_P)); \
2890 } \
2891 if (fValidSelector) \
2892 { \
2893 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##seg; \
2894 pVCpu->hm.s.vmx.RestoreHost.uHostSel##seg = (selValue); \
2895 } \
2896 (selValue) = 0; \
2897 }
2898#endif
2899
2900
2901/**
2902 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2903 * the host-state area in the VMCS.
2904 *
2905 * @returns VBox status code.
2906 * @param pVM Pointer to the VM.
2907 * @param pVCpu Pointer to the VMCPU.
2908 */
2909DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
2910{
2911 int rc = VERR_INTERNAL_ERROR_5;
2912
2913#if HC_ARCH_BITS == 64
2914 /*
2915 * If we've executed guest code using VT-x, the host-state bits will be messed up. We
2916 * should -not- save the messed up state without restoring the original host-state. See @bugref{7240}.
2917 */
2918 AssertMsgReturn(!(pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED),
2919 ("Re-saving host-state after executing guest code without leaving VT-x!\n"), VERR_WRONG_ORDER);
2920#endif
2921
2922 /*
2923 * Host DS, ES, FS and GS segment registers.
2924 */
2925#if HC_ARCH_BITS == 64
2926 RTSEL uSelDS = ASMGetDS();
2927 RTSEL uSelES = ASMGetES();
2928 RTSEL uSelFS = ASMGetFS();
2929 RTSEL uSelGS = ASMGetGS();
2930#else
2931 RTSEL uSelDS = 0;
2932 RTSEL uSelES = 0;
2933 RTSEL uSelFS = 0;
2934 RTSEL uSelGS = 0;
2935#endif
2936
2937 /* Recalculate which host-state bits need to be manually restored. */
2938 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
2939
2940 /*
2941 * Host CS and SS segment registers.
2942 */
2943#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2944 RTSEL uSelCS;
2945 RTSEL uSelSS;
2946 if (HMVMX_IS_64BIT_HOST_MODE())
2947 {
2948 uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;
2949 uSelSS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;
2950 }
2951 else
2952 {
2953 /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */
2954 uSelCS = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;
2955 uSelSS = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;
2956 }
2957#else
2958 RTSEL uSelCS = ASMGetCS();
2959 RTSEL uSelSS = ASMGetSS();
2960#endif
2961
2962 /*
2963 * Host TR segment register.
2964 */
2965 RTSEL uSelTR = ASMGetTR();
2966
2967#if HC_ARCH_BITS == 64
2968 /*
2969 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them
2970 * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
2971 */
2972 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS);
2973 VMXLOCAL_ADJUST_HOST_SEG(ES, uSelES);
2974 VMXLOCAL_ADJUST_HOST_SEG(FS, uSelFS);
2975 VMXLOCAL_ADJUST_HOST_SEG(GS, uSelGS);
2976# undef VMXLOCAL_ADJUST_HOST_SEG
2977#endif
2978
2979 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2980 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2981 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2982 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2983 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2984 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2985 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2986 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));
2987 Assert(uSelCS);
2988 Assert(uSelTR);
2989
2990 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2991#if 0
2992 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE))
2993 Assert(uSelSS != 0);
2994#endif
2995
2996 /* Write these host selector fields into the host-state area in the VMCS. */
2997 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); AssertRCReturn(rc, rc);
2998 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); AssertRCReturn(rc, rc);
2999#if HC_ARCH_BITS == 64
3000 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); AssertRCReturn(rc, rc);
3001 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); AssertRCReturn(rc, rc);
3002 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS); AssertRCReturn(rc, rc);
3003 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS); AssertRCReturn(rc, rc);
3004#endif
3005 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR); AssertRCReturn(rc, rc);
3006
3007 /*
3008 * Host GDTR and IDTR.
3009 */
3010 RTGDTR Gdtr;
3011 RT_ZERO(Gdtr);
3012#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
3013 if (HMVMX_IS_64BIT_HOST_MODE())
3014 {
3015 X86XDTR64 Gdtr64;
3016 X86XDTR64 Idtr64;
3017 HMR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64);
3018 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr); AssertRCReturn(rc, rc);
3019 rc = VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr); AssertRCReturn(rc, rc);
3020
3021 Gdtr.cbGdt = Gdtr64.cb;
3022 Gdtr.pGdt = (uintptr_t)Gdtr64.uAddr;
3023 }
3024 else
3025#endif
3026 {
3027 RTIDTR Idtr;
3028 ASMGetGDTR(&Gdtr);
3029 ASMGetIDTR(&Idtr);
3030 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); AssertRCReturn(rc, rc);
3031 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); AssertRCReturn(rc, rc);
3032
3033#if HC_ARCH_BITS == 64
3034 /*
3035 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the
3036 * maximum limit (0xffff) on every VM-exit.
3037 */
3038 if (Gdtr.cbGdt != 0xffff)
3039 {
3040 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
3041 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
3042 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
3043 }
3044
3045 /*
3046 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT"
3047 * and Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit as 0xfff, VT-x
3048 * bloating the limit to 0xffff shouldn't cause any different CPU behavior. However, several hosts either insists
3049 * on 0xfff being the limit (Windows Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there
3050 * but botches sidt alignment in at least one consumer). So, we're only allowing IDTR.LIMIT to be left at 0xffff on
3051 * hosts where we are pretty sure it won't cause trouble.
3052 */
3053# if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
3054 if (Idtr.cbIdt < 0x0fff)
3055# else
3056 if (Idtr.cbIdt != 0xffff)
3057# endif
3058 {
3059 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
3060 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
3061 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
3062 }
3063#endif
3064 }
3065
3066 /*
3067 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
3068 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
3069 */
3070 AssertMsgReturn((uSelTR | X86_SEL_RPL_LDT) <= Gdtr.cbGdt,
3071 ("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt),
3072 VERR_VMX_INVALID_HOST_STATE);
3073
3074 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
3075#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
3076 if (HMVMX_IS_64BIT_HOST_MODE())
3077 {
3078 /* We need the 64-bit TR base for hybrid darwin. */
3079 uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc);
3080 rc = VMXWriteVmcs64(VMX_VMCS_HOST_TR_BASE, u64TRBase);
3081 }
3082 else
3083#endif
3084 {
3085 uintptr_t uTRBase;
3086#if HC_ARCH_BITS == 64
3087 uTRBase = X86DESC64_BASE(pDesc);
3088
3089 /*
3090 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits.
3091 * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else.
3092 * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode.
3093 * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
3094 *
3095 * [1] See Intel spec. 3.5 "System Descriptor Types".
3096 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
3097 */
3098 Assert(pDesc->System.u4Type == 11);
3099 if ( pDesc->System.u16LimitLow != 0x67
3100 || pDesc->System.u4LimitHigh)
3101 {
3102 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
3103 /* If the host has made GDT read-only, we would need to temporarily toggle CR0.WP before writing the GDT. */
3104 if (pVM->hm.s.uHostKernelFeatures & SUPKERNELFEATURES_GDT_READ_ONLY)
3105 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_READ_ONLY;
3106 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
3107
3108 /* Store the GDTR here as we need it while restoring TR. */
3109 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
3110 }
3111#else
3112 uTRBase = X86DESC_BASE(pDesc);
3113#endif
3114 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
3115 }
3116 AssertRCReturn(rc, rc);
3117
3118 /*
3119 * Host FS base and GS base.
3120 */
3121#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3122 if (HMVMX_IS_64BIT_HOST_MODE())
3123 {
3124 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
3125 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
3126 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); AssertRCReturn(rc, rc);
3127 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); AssertRCReturn(rc, rc);
3128
3129# if HC_ARCH_BITS == 64
3130 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
3131 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
3132 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
3133 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
3134 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
3135# endif
3136 }
3137#endif
3138 return rc;
3139}
3140
3141
3142/**
3143 * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
3144 * host-state area of the VMCS. Theses MSRs will be automatically restored on
3145 * the host after every successful VM-exit.
3146 *
3147 * @returns VBox status code.
3148 * @param pVM Pointer to the VM.
3149 * @param pVCpu Pointer to the VMCPU.
3150 *
3151 * @remarks No-long-jump zone!!!
3152 */
3153DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
3154{
3155 NOREF(pVM);
3156
3157 AssertPtr(pVCpu);
3158 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
3159
3160 int rc = VINF_SUCCESS;
3161#if HC_ARCH_BITS == 64
3162 if (pVM->hm.s.fAllow64BitGuests)
3163 hmR0VmxLazySaveHostMsrs(pVCpu);
3164#endif
3165
3166 /*
3167 * Host Sysenter MSRs.
3168 */
3169 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
3170 AssertRCReturn(rc, rc);
3171#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
3172 if (HMVMX_IS_64BIT_HOST_MODE())
3173 {
3174 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
3175 AssertRCReturn(rc, rc);
3176 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
3177 }
3178 else
3179 {
3180 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
3181 AssertRCReturn(rc, rc);
3182 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
3183 }
3184#elif HC_ARCH_BITS == 32
3185 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
3186 AssertRCReturn(rc, rc);
3187 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
3188#else
3189 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
3190 AssertRCReturn(rc, rc);
3191 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
3192#endif
3193 AssertRCReturn(rc, rc);
3194
3195 /*
3196 * Host EFER MSR.
3197 * If the CPU supports the newer VMCS controls for managing EFER, use it.
3198 * Otherwise it's done as part of auto-load/store MSR area in the VMCS, see hmR0VmxLoadGuestMsrs().
3199 */
3200 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
3201 {
3202 rc = VMXWriteVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, pVM->hm.s.vmx.u64HostEfer);
3203 AssertRCReturn(rc, rc);
3204 }
3205
3206 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see
3207 * hmR0VmxLoadGuestExitCtls() !! */
3208
3209 return rc;
3210}
3211
3212
3213/**
3214 * Figures out if we need to swap the EFER MSR which is
3215 * particularly expensive.
3216 *
3217 * We check all relevant bits. For now, that's everything
3218 * besides LMA/LME, as these two bits are handled by VM-entry,
3219 * see hmR0VmxLoadGuestExitCtls() and
3220 * hmR0VMxLoadGuestEntryCtls().
3221 *
3222 * @returns true if we need to load guest EFER, false otherwise.
3223 * @param pVCpu Pointer to the VMCPU.
3224 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3225 * out-of-sync. Make sure to update the required fields
3226 * before using them.
3227 *
3228 * @remarks Requires EFER, CR4.
3229 * @remarks No-long-jump zone!!!
3230 */
3231static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3232{
3233#ifdef HMVMX_ALWAYS_SWAP_EFER
3234 return true;
3235#endif
3236
3237#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3238 /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */
3239 if (CPUMIsGuestInLongMode(pVCpu))
3240 return false;
3241#endif
3242
3243 PVM pVM = pVCpu->CTX_SUFF(pVM);
3244 uint64_t u64HostEfer = pVM->hm.s.vmx.u64HostEfer;
3245 uint64_t u64GuestEfer = pMixedCtx->msrEFER;
3246
3247 /*
3248 * For 64-bit guests, if EFER.SCE bit differs, we need to swap to ensure that the
3249 * guest's SYSCALL behaviour isn't screwed. See @bugref{7386}.
3250 */
3251 if ( CPUMIsGuestInLongMode(pVCpu)
3252 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
3253 {
3254 return true;
3255 }
3256
3257 /*
3258 * If the guest uses PAE and EFER.NXE bit differs, we need to swap EFER as it
3259 * affects guest paging. 64-bit paging implies CR4.PAE as well.
3260 * See Intel spec. 4.5 "IA-32e Paging" and Intel spec. 4.1.1 "Three Paging Modes".
3261 */
3262 if ( (pMixedCtx->cr4 & X86_CR4_PAE)
3263 && (pMixedCtx->cr0 & X86_CR0_PG)
3264 && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
3265 {
3266 /* Assert that host is PAE capable. */
3267 Assert(pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_NX);
3268 return true;
3269 }
3270
3271 /** @todo Check the latest Intel spec. for any other bits,
3272 * like SMEP/SMAP? */
3273 return false;
3274}
3275
3276
3277/**
3278 * Sets up VM-entry controls in the VMCS. These controls can affect things done
3279 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
3280 * controls".
3281 *
3282 * @returns VBox status code.
3283 * @param pVCpu Pointer to the VMCPU.
3284 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3285 * out-of-sync. Make sure to update the required fields
3286 * before using them.
3287 *
3288 * @remarks Requires EFER.
3289 * @remarks No-long-jump zone!!!
3290 */
3291DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3292{
3293 int rc = VINF_SUCCESS;
3294 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS))
3295 {
3296 PVM pVM = pVCpu->CTX_SUFF(pVM);
3297 uint32_t val = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */
3298 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3299
3300 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
3301 val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
3302
3303 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
3304 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3305 {
3306 val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
3307 Log4(("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n", pVCpu->idCpu));
3308 }
3309 else
3310 Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
3311
3312 /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */
3313 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3314 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3315 {
3316 val |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR;
3317 Log4(("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n", pVCpu->idCpu));
3318 }
3319
3320 /*
3321 * The following should -not- be set (since we're not in SMM mode):
3322 * - VMX_VMCS_CTRL_ENTRY_ENTRY_SMM
3323 * - VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON
3324 */
3325
3326 /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
3327 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR. */
3328
3329 if ((val & zap) != val)
3330 {
3331 LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
3332 pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0, val, zap));
3333 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
3334 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3335 }
3336
3337 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, val);
3338 AssertRCReturn(rc, rc);
3339
3340 pVCpu->hm.s.vmx.u32EntryCtls = val;
3341 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS);
3342 }
3343 return rc;
3344}
3345
3346
3347/**
3348 * Sets up the VM-exit controls in the VMCS.
3349 *
3350 * @returns VBox status code.
3351 * @param pVM Pointer to the VM.
3352 * @param pVCpu Pointer to the VMCPU.
3353 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3354 * out-of-sync. Make sure to update the required fields
3355 * before using them.
3356 *
3357 * @remarks Requires EFER.
3358 */
3359DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3360{
3361 NOREF(pMixedCtx);
3362
3363 int rc = VINF_SUCCESS;
3364 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS))
3365 {
3366 PVM pVM = pVCpu->CTX_SUFF(pVM);
3367 uint32_t val = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0; /* Bits set here must be set in the VMCS. */
3368 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3369
3370 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
3371 val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
3372
3373 /*
3374 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary.
3375 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in hmR0VmxSaveHostMsrs().
3376 */
3377#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3378 if (HMVMX_IS_64BIT_HOST_MODE())
3379 {
3380 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3381 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
3382 }
3383 else
3384 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
3385#else
3386 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3387 {
3388 /* The switcher returns to long mode, EFER is managed by the switcher. */
3389 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3390 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
3391 }
3392 else
3393 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
3394#endif /* HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
3395
3396 /* If the newer VMCS fields for managing EFER exists, use it. */
3397 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3398 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3399 {
3400 val |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR
3401 | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;
3402 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n", pVCpu->idCpu));
3403 }
3404
3405 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
3406 Assert(!(val & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
3407
3408 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
3409 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
3410 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR. */
3411
3412 if ( pVM->hm.s.vmx.fUsePreemptTimer
3413 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER))
3414 val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
3415
3416 if ((val & zap) != val)
3417 {
3418 LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
3419 pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, val, zap));
3420 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
3421 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3422 }
3423
3424 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, val);
3425 AssertRCReturn(rc, rc);
3426
3427 pVCpu->hm.s.vmx.u32ExitCtls = val;
3428 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_EXIT_CTLS);
3429 }
3430 return rc;
3431}
3432
3433
3434/**
3435 * Loads the guest APIC and related state.
3436 *
3437 * @returns VBox status code.
3438 * @param pVM Pointer to the VM.
3439 * @param pVCpu Pointer to the VMCPU.
3440 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3441 * out-of-sync. Make sure to update the required fields
3442 * before using them.
3443 */
3444DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3445{
3446 NOREF(pMixedCtx);
3447
3448 int rc = VINF_SUCCESS;
3449 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE))
3450 {
3451 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
3452 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
3453 {
3454 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
3455
3456 bool fPendingIntr = false;
3457 uint8_t u8Tpr = 0;
3458 uint8_t u8PendingIntr = 0;
3459 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
3460 AssertRCReturn(rc, rc);
3461
3462 /*
3463 * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when
3464 * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt.
3465 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
3466 * the interrupt when we VM-exit for other reasons.
3467 */
3468 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
3469 uint32_t u32TprThreshold = 0;
3470 if (fPendingIntr)
3471 {
3472 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */
3473 const uint8_t u8PendingPriority = (u8PendingIntr >> 4) & 0xf;
3474 const uint8_t u8TprPriority = (u8Tpr >> 4) & 0xf;
3475 if (u8PendingPriority <= u8TprPriority)
3476 u32TprThreshold = u8PendingPriority;
3477 else
3478 u32TprThreshold = u8TprPriority; /* Required for Vista 64-bit guest, see @bugref{6398}. */
3479 }
3480 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
3481
3482 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
3483 AssertRCReturn(rc, rc);
3484 }
3485
3486 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
3487 }
3488 return rc;
3489}
3490
3491
3492/**
3493 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
3494 *
3495 * @returns Guest's interruptibility-state.
3496 * @param pVCpu Pointer to the VMCPU.
3497 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3498 * out-of-sync. Make sure to update the required fields
3499 * before using them.
3500 *
3501 * @remarks No-long-jump zone!!!
3502 */
3503DECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3504{
3505 /*
3506 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
3507 */
3508 uint32_t uIntrState = 0;
3509 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3510 {
3511 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
3512 AssertMsg(HMVMXCPU_GST_IS_SET(pVCpu, HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS),
3513 ("%#x\n", HMVMXCPU_GST_VALUE(pVCpu)));
3514 if (pMixedCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
3515 {
3516 if (pMixedCtx->eflags.Bits.u1IF)
3517 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
3518 else
3519 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
3520 }
3521 /* else: Although we can clear the force-flag here, let's keep this side-effects free. */
3522 }
3523
3524 /*
3525 * NMIs to the guest are blocked after an NMI is injected until the guest executes an IRET. We only
3526 * bother with virtual-NMI blocking when we have support for virtual NMIs in the CPU, otherwise
3527 * setting this would block host-NMIs and IRET will not clear the blocking.
3528 *
3529 * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}.
3530 */
3531 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)
3532 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
3533 {
3534 uIntrState |= VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI;
3535 }
3536
3537 return uIntrState;
3538}
3539
3540
3541/**
3542 * Loads the guest's interruptibility-state into the guest-state area in the
3543 * VMCS.
3544 *
3545 * @returns VBox status code.
3546 * @param pVCpu Pointer to the VMCPU.
3547 * @param uIntrState The interruptibility-state to set.
3548 */
3549static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
3550{
3551 NOREF(pVCpu);
3552 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */
3553 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
3554 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
3555 AssertRCReturn(rc, rc);
3556 return rc;
3557}
3558
3559
3560/**
3561 * Loads the exception intercepts required for guest execution in the VMCS.
3562 *
3563 * @returns VBox status code.
3564 * @param pVCpu Pointer to the VMCPU.
3565 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3566 * out-of-sync. Make sure to update the required fields
3567 * before using them.
3568 */
3569static int hmR0VmxLoadGuestXcptIntercepts(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3570{
3571 NOREF(pMixedCtx);
3572 int rc = VINF_SUCCESS;
3573 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
3574 {
3575 /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxLoadSharedCR0(). */
3576 if (pVCpu->hm.s.fGIMTrapXcptUD)
3577 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_UD);
3578 else
3579 {
3580#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3581 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_UD);
3582#endif
3583 }
3584
3585 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3586 AssertRCReturn(rc, rc);
3587
3588 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
3589 Log4(("Load[%RU32]: VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu,
3590 pVCpu->hm.s.vmx.u32XcptBitmap, HMCPU_CF_VALUE(pVCpu)));
3591 }
3592 return rc;
3593}
3594
3595
3596/**
3597 * Loads the guest's RIP into the guest-state area in the VMCS.
3598 *
3599 * @returns VBox status code.
3600 * @param pVCpu Pointer to the VMCPU.
3601 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3602 * out-of-sync. Make sure to update the required fields
3603 * before using them.
3604 *
3605 * @remarks No-long-jump zone!!!
3606 */
3607static int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3608{
3609 int rc = VINF_SUCCESS;
3610 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))
3611 {
3612 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
3613 AssertRCReturn(rc, rc);
3614
3615 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP);
3616 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu, pMixedCtx->rip,
3617 HMCPU_CF_VALUE(pVCpu)));
3618 }
3619 return rc;
3620}
3621
3622
3623/**
3624 * Loads the guest's RSP into the guest-state area in the VMCS.
3625 *
3626 * @returns VBox status code.
3627 * @param pVCpu Pointer to the VMCPU.
3628 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3629 * out-of-sync. Make sure to update the required fields
3630 * before using them.
3631 *
3632 * @remarks No-long-jump zone!!!
3633 */
3634static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3635{
3636 int rc = VINF_SUCCESS;
3637 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP))
3638 {
3639 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
3640 AssertRCReturn(rc, rc);
3641
3642 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP);
3643 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RSP=%#RX64\n", pVCpu->idCpu, pMixedCtx->rsp));
3644 }
3645 return rc;
3646}
3647
3648
3649/**
3650 * Loads the guest's RFLAGS into the guest-state area in the VMCS.
3651 *
3652 * @returns VBox status code.
3653 * @param pVCpu Pointer to the VMCPU.
3654 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3655 * out-of-sync. Make sure to update the required fields
3656 * before using them.
3657 *
3658 * @remarks No-long-jump zone!!!
3659 */
3660static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3661{
3662 int rc = VINF_SUCCESS;
3663 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
3664 {
3665 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
3666 Let us assert it as such and use 32-bit VMWRITE. */
3667 Assert(!(pMixedCtx->rflags.u64 >> 32));
3668 X86EFLAGS Eflags = pMixedCtx->eflags;
3669 /** @todo r=bird: There shall be no need to OR in X86_EFL_1 here, nor
3670 * shall there be any reason for clearing bits 63:22, 15, 5 and 3.
3671 * These will never be cleared/set, unless some other part of the VMM
3672 * code is buggy - in which case we're better of finding and fixing
3673 * those bugs than hiding them. */
3674 Assert(Eflags.u32 & X86_EFL_RA1_MASK);
3675 Assert(!(Eflags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
3676 Eflags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
3677 Eflags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */
3678
3679 /*
3680 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM-exit.
3681 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
3682 */
3683 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3684 {
3685 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3686 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3687 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = Eflags.u32; /* Save the original eflags of the real-mode guest. */
3688 Eflags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
3689 Eflags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
3690 }
3691
3692 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, Eflags.u32);
3693 AssertRCReturn(rc, rc);
3694
3695 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS);
3696 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", pVCpu->idCpu, Eflags.u32));
3697 }
3698 return rc;
3699}
3700
3701
3702/**
3703 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
3704 *
3705 * @returns VBox status code.
3706 * @param pVCpu Pointer to the VMCPU.
3707 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3708 * out-of-sync. Make sure to update the required fields
3709 * before using them.
3710 *
3711 * @remarks No-long-jump zone!!!
3712 */
3713DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3714{
3715 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
3716 AssertRCReturn(rc, rc);
3717 rc = hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
3718 AssertRCReturn(rc, rc);
3719 rc = hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
3720 AssertRCReturn(rc, rc);
3721 return rc;
3722}
3723
3724
3725/**
3726 * Loads the guest CR0 control register into the guest-state area in the VMCS.
3727 * CR0 is partially shared with the host and we have to consider the FPU bits.
3728 *
3729 * @returns VBox status code.
3730 * @param pVM Pointer to the VM.
3731 * @param pVCpu Pointer to the VMCPU.
3732 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3733 * out-of-sync. Make sure to update the required fields
3734 * before using them.
3735 *
3736 * @remarks No-long-jump zone!!!
3737 */
3738static int hmR0VmxLoadSharedCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3739{
3740 /*
3741 * Guest CR0.
3742 * Guest FPU.
3743 */
3744 int rc = VINF_SUCCESS;
3745 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
3746 {
3747 Assert(!(pMixedCtx->cr0 >> 32));
3748 uint32_t u32GuestCR0 = pMixedCtx->cr0;
3749 PVM pVM = pVCpu->CTX_SUFF(pVM);
3750
3751 /* The guest's view (read access) of its CR0 is unblemished. */
3752 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
3753 AssertRCReturn(rc, rc);
3754 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR0));
3755
3756 /* Setup VT-x's view of the guest CR0. */
3757 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
3758 if (pVM->hm.s.fNestedPaging)
3759 {
3760 if (CPUMIsGuestPagingEnabledEx(pMixedCtx))
3761 {
3762 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
3763 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3764 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
3765 }
3766 else
3767 {
3768 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
3769 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3770 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3771 }
3772
3773 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
3774 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3775 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3776
3777 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3778 AssertRCReturn(rc, rc);
3779 }
3780 else
3781 u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
3782
3783 /*
3784 * Guest FPU bits.
3785 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
3786 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
3787 */
3788 u32GuestCR0 |= X86_CR0_NE;
3789 bool fInterceptNM = false;
3790 if (CPUMIsGuestFPUStateActive(pVCpu))
3791 {
3792 fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
3793 /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
3794 We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
3795 }
3796 else
3797 {
3798 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
3799 u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
3800 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
3801 }
3802
3803 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
3804 bool fInterceptMF = false;
3805 if (!(pMixedCtx->cr0 & X86_CR0_NE))
3806 fInterceptMF = true;
3807
3808 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
3809 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3810 {
3811 Assert(PDMVmmDevHeapIsEnabled(pVM));
3812 Assert(pVM->hm.s.vmx.pRealModeTSS);
3813 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
3814 fInterceptNM = true;
3815 fInterceptMF = true;
3816 }
3817 else
3818 {
3819 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626} comment #11. */
3820 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
3821 }
3822 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
3823
3824 if (fInterceptNM)
3825 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
3826 else
3827 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
3828
3829 if (fInterceptMF)
3830 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
3831 else
3832 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
3833
3834 /* Additional intercepts for debugging, define these yourself explicitly. */
3835#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3836 pVCpu->hm.s.vmx.u32XcptBitmap |= 0
3837 | RT_BIT(X86_XCPT_BP)
3838 | RT_BIT(X86_XCPT_DB)
3839 | RT_BIT(X86_XCPT_DE)
3840 | RT_BIT(X86_XCPT_NM)
3841 | RT_BIT(X86_XCPT_TS)
3842 | RT_BIT(X86_XCPT_UD)
3843 | RT_BIT(X86_XCPT_NP)
3844 | RT_BIT(X86_XCPT_SS)
3845 | RT_BIT(X86_XCPT_GP)
3846 | RT_BIT(X86_XCPT_PF)
3847 | RT_BIT(X86_XCPT_MF)
3848 ;
3849#elif defined(HMVMX_ALWAYS_TRAP_PF)
3850 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
3851#endif
3852
3853 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
3854
3855 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
3856 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3857 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3858 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
3859 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
3860 else
3861 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
3862
3863 u32GuestCR0 |= uSetCR0;
3864 u32GuestCR0 &= uZapCR0;
3865 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
3866
3867 /* Write VT-x's view of the guest CR0 into the VMCS. */
3868 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
3869 AssertRCReturn(rc, rc);
3870 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", pVCpu->idCpu, u32GuestCR0, uSetCR0,
3871 uZapCR0));
3872
3873 /*
3874 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
3875 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
3876 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
3877 */
3878 uint32_t u32CR0Mask = 0;
3879 u32CR0Mask = X86_CR0_PE
3880 | X86_CR0_NE
3881 | X86_CR0_WP
3882 | X86_CR0_PG
3883 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
3884 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
3885 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
3886
3887 /** @todo Avoid intercepting CR0.PE with unrestricted guests. Fix PGM
3888 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
3889 * and @bugref{6944}. */
3890#if 0
3891 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3892 u32CR0Mask &= ~X86_CR0_PE;
3893#endif
3894 if (pVM->hm.s.fNestedPaging)
3895 u32CR0Mask &= ~X86_CR0_WP;
3896
3897 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
3898 if (fInterceptNM)
3899 {
3900 u32CR0Mask |= X86_CR0_TS
3901 | X86_CR0_MP;
3902 }
3903
3904 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
3905 pVCpu->hm.s.vmx.u32CR0Mask = u32CR0Mask;
3906 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask);
3907 AssertRCReturn(rc, rc);
3908 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", pVCpu->idCpu, u32CR0Mask));
3909
3910 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
3911 }
3912 return rc;
3913}
3914
3915
3916/**
3917 * Loads the guest control registers (CR3, CR4) into the guest-state area
3918 * in the VMCS.
3919 *
3920 * @returns VBox status code.
3921 * @param pVM Pointer to the VM.
3922 * @param pVCpu Pointer to the VMCPU.
3923 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3924 * out-of-sync. Make sure to update the required fields
3925 * before using them.
3926 *
3927 * @remarks No-long-jump zone!!!
3928 */
3929static int hmR0VmxLoadGuestCR3AndCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3930{
3931 int rc = VINF_SUCCESS;
3932 PVM pVM = pVCpu->CTX_SUFF(pVM);
3933
3934 /*
3935 * Guest CR2.
3936 * It's always loaded in the assembler code. Nothing to do here.
3937 */
3938
3939 /*
3940 * Guest CR3.
3941 */
3942 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
3943 {
3944 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
3945 if (pVM->hm.s.fNestedPaging)
3946 {
3947 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
3948
3949 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
3950 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
3951 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
3952 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
3953
3954 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
3955 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
3956 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
3957
3958 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
3959 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
3960 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x3f) == 0, /* Bits 6:11 MBZ. */
3961 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3962
3963 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
3964 AssertRCReturn(rc, rc);
3965 Log4(("Load[%RU32]: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->idCpu, pVCpu->hm.s.vmx.HCPhysEPTP));
3966
3967 if ( pVM->hm.s.vmx.fUnrestrictedGuest
3968 || CPUMIsGuestPagingEnabledEx(pMixedCtx))
3969 {
3970 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
3971 if (CPUMIsGuestInPAEModeEx(pMixedCtx))
3972 {
3973 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); AssertRCReturn(rc, rc);
3974 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
3975 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
3976 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
3977 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
3978 }
3979
3980 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
3981 have Unrestricted Execution to handle the guest when it's not using paging. */
3982 GCPhysGuestCR3 = pMixedCtx->cr3;
3983 }
3984 else
3985 {
3986 /*
3987 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
3988 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
3989 * EPT takes care of translating it to host-physical addresses.
3990 */
3991 RTGCPHYS GCPhys;
3992 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
3993 Assert(PDMVmmDevHeapIsEnabled(pVM));
3994
3995 /* We obtain it here every time as the guest could have relocated this PCI region. */
3996 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
3997 AssertRCReturn(rc, rc);
3998
3999 GCPhysGuestCR3 = GCPhys;
4000 }
4001
4002 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RGv (GstN)\n", pVCpu->idCpu, GCPhysGuestCR3));
4003 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
4004 }
4005 else
4006 {
4007 /* Non-nested paging case, just use the hypervisor's CR3. */
4008 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
4009
4010 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", pVCpu->idCpu, HCPhysGuestCR3));
4011 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
4012 }
4013 AssertRCReturn(rc, rc);
4014
4015 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
4016 }
4017
4018 /*
4019 * Guest CR4.
4020 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
4021 */
4022 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
4023 {
4024 Assert(!(pMixedCtx->cr4 >> 32));
4025 uint32_t u32GuestCR4 = pMixedCtx->cr4;
4026
4027 /* The guest's view of its CR4 is unblemished. */
4028 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
4029 AssertRCReturn(rc, rc);
4030 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR4));
4031
4032 /* Setup VT-x's view of the guest CR4. */
4033 /*
4034 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
4035 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
4036 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
4037 */
4038 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4039 {
4040 Assert(pVM->hm.s.vmx.pRealModeTSS);
4041 Assert(PDMVmmDevHeapIsEnabled(pVM));
4042 u32GuestCR4 &= ~X86_CR4_VME;
4043 }
4044
4045 if (pVM->hm.s.fNestedPaging)
4046 {
4047 if ( !CPUMIsGuestPagingEnabledEx(pMixedCtx)
4048 && !pVM->hm.s.vmx.fUnrestrictedGuest)
4049 {
4050 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
4051 u32GuestCR4 |= X86_CR4_PSE;
4052 /* Our identity mapping is a 32-bit page directory. */
4053 u32GuestCR4 &= ~X86_CR4_PAE;
4054 }
4055 /* else use guest CR4.*/
4056 }
4057 else
4058 {
4059 /*
4060 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
4061 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
4062 */
4063 switch (pVCpu->hm.s.enmShadowMode)
4064 {
4065 case PGMMODE_REAL: /* Real-mode. */
4066 case PGMMODE_PROTECTED: /* Protected mode without paging. */
4067 case PGMMODE_32_BIT: /* 32-bit paging. */
4068 {
4069 u32GuestCR4 &= ~X86_CR4_PAE;
4070 break;
4071 }
4072
4073 case PGMMODE_PAE: /* PAE paging. */
4074 case PGMMODE_PAE_NX: /* PAE paging with NX. */
4075 {
4076 u32GuestCR4 |= X86_CR4_PAE;
4077 break;
4078 }
4079
4080 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
4081 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
4082#ifdef VBOX_ENABLE_64_BITS_GUESTS
4083 break;
4084#endif
4085 default:
4086 AssertFailed();
4087 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4088 }
4089 }
4090
4091 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
4092 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
4093 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
4094 u32GuestCR4 |= uSetCR4;
4095 u32GuestCR4 &= uZapCR4;
4096
4097 /* Write VT-x's view of the guest CR4 into the VMCS. */
4098 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", pVCpu->idCpu, u32GuestCR4, uSetCR4, uZapCR4));
4099 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4);
4100 AssertRCReturn(rc, rc);
4101
4102 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM-exit. */
4103 uint32_t u32CR4Mask = X86_CR4_VME
4104 | X86_CR4_PAE
4105 | X86_CR4_PGE
4106 | X86_CR4_PSE
4107 | X86_CR4_VMXE;
4108 if (pVM->cpum.ro.HostFeatures.fXSaveRstor)
4109 u32CR4Mask |= X86_CR4_OSXSAVE;
4110 pVCpu->hm.s.vmx.u32CR4Mask = u32CR4Mask;
4111 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask);
4112 AssertRCReturn(rc, rc);
4113
4114 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
4115 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
4116
4117 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
4118 }
4119 return rc;
4120}
4121
4122
4123/**
4124 * Loads the guest debug registers into the guest-state area in the VMCS.
4125 * This also sets up whether #DB and MOV DRx accesses cause VM-exits.
4126 *
4127 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
4128 *
4129 * @returns VBox status code.
4130 * @param pVCpu Pointer to the VMCPU.
4131 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4132 * out-of-sync. Make sure to update the required fields
4133 * before using them.
4134 *
4135 * @remarks No-long-jump zone!!!
4136 */
4137static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4138{
4139 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
4140 return VINF_SUCCESS;
4141
4142#ifdef VBOX_STRICT
4143 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
4144 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
4145 {
4146 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
4147 Assert((pMixedCtx->dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0); /* Bits 63:32, 15, 14, 12, 11 are reserved. */
4148 Assert((pMixedCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); /* Bit 10 is reserved (RA1). */
4149 }
4150#endif
4151
4152 int rc;
4153 PVM pVM = pVCpu->CTX_SUFF(pVM);
4154 bool fInterceptDB = false;
4155 bool fInterceptMovDRx = false;
4156 if ( pVCpu->hm.s.fSingleInstruction
4157 || DBGFIsStepping(pVCpu))
4158 {
4159 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
4160 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
4161 {
4162 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
4163 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
4164 AssertRCReturn(rc, rc);
4165 Assert(fInterceptDB == false);
4166 }
4167 else
4168 {
4169 pMixedCtx->eflags.u32 |= X86_EFL_TF;
4170 pVCpu->hm.s.fClearTrapFlag = true;
4171 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
4172 fInterceptDB = true;
4173 }
4174 }
4175
4176 if ( fInterceptDB
4177 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
4178 {
4179 /*
4180 * Use the combined guest and host DRx values found in the hypervisor
4181 * register set because the debugger has breakpoints active or someone
4182 * is single stepping on the host side without a monitor trap flag.
4183 *
4184 * Note! DBGF expects a clean DR6 state before executing guest code.
4185 */
4186#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4187 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4188 && !CPUMIsHyperDebugStateActivePending(pVCpu))
4189 {
4190 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4191 Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
4192 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
4193 }
4194 else
4195#endif
4196 if (!CPUMIsHyperDebugStateActive(pVCpu))
4197 {
4198 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4199 Assert(CPUMIsHyperDebugStateActive(pVCpu));
4200 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
4201 }
4202
4203 /* Update DR7. (The other DRx values are handled by CPUM one way or the other.) */
4204 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)CPUMGetHyperDR7(pVCpu));
4205 AssertRCReturn(rc, rc);
4206
4207 pVCpu->hm.s.fUsingHyperDR7 = true;
4208 fInterceptDB = true;
4209 fInterceptMovDRx = true;
4210 }
4211 else
4212 {
4213 /*
4214 * If the guest has enabled debug registers, we need to load them prior to
4215 * executing guest code so they'll trigger at the right time.
4216 */
4217 if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
4218 {
4219#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4220 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4221 && !CPUMIsGuestDebugStateActivePending(pVCpu))
4222 {
4223 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4224 Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
4225 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
4226 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4227 }
4228 else
4229#endif
4230 if (!CPUMIsGuestDebugStateActive(pVCpu))
4231 {
4232 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4233 Assert(CPUMIsGuestDebugStateActive(pVCpu));
4234 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
4235 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4236 }
4237 Assert(!fInterceptDB);
4238 Assert(!fInterceptMovDRx);
4239 }
4240 /*
4241 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
4242 * must intercept #DB in order to maintain a correct DR6 guest value.
4243 */
4244#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4245 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu)
4246 && !CPUMIsGuestDebugStateActive(pVCpu))
4247#else
4248 else if (!CPUMIsGuestDebugStateActive(pVCpu))
4249#endif
4250 {
4251 fInterceptMovDRx = true;
4252 fInterceptDB = true;
4253 }
4254
4255 /* Update guest DR7. */
4256 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
4257 AssertRCReturn(rc, rc);
4258
4259 pVCpu->hm.s.fUsingHyperDR7 = false;
4260 }
4261
4262 /*
4263 * Update the exception bitmap regarding intercepting #DB generated by the guest.
4264 */
4265 if ( fInterceptDB
4266 || pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4267 {
4268 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
4269 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
4270 }
4271 else
4272 {
4273#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
4274 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
4275 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
4276#endif
4277 }
4278
4279 /*
4280 * Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions.
4281 */
4282 if (fInterceptMovDRx)
4283 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4284 else
4285 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4286 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
4287 AssertRCReturn(rc, rc);
4288
4289 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
4290 return VINF_SUCCESS;
4291}
4292
4293
4294#ifdef VBOX_STRICT
4295/**
4296 * Strict function to validate segment registers.
4297 *
4298 * @remarks ASSUMES CR0 is up to date.
4299 */
4300static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4301{
4302 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
4303 /* NOTE: The reason we check for attribute value 0 and not just the unusable bit here is because hmR0VmxWriteSegmentReg()
4304 * only updates the VMCS' copy of the value with the unusable bit and doesn't change the guest-context value. */
4305 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
4306 && ( !CPUMIsGuestInRealModeEx(pCtx)
4307 && !CPUMIsGuestInV86ModeEx(pCtx)))
4308 {
4309 /* Protected mode checks */
4310 /* CS */
4311 Assert(pCtx->cs.Attr.n.u1Present);
4312 Assert(!(pCtx->cs.Attr.u & 0xf00));
4313 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
4314 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4315 || !(pCtx->cs.Attr.n.u1Granularity));
4316 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
4317 || (pCtx->cs.Attr.n.u1Granularity));
4318 /* CS cannot be loaded with NULL in protected mode. */
4319 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS?!? */
4320 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4321 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
4322 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4323 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
4324 else
4325 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
4326 /* SS */
4327 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4328 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
4329 if ( !(pCtx->cr0 & X86_CR0_PE)
4330 || pCtx->cs.Attr.n.u4Type == 3)
4331 {
4332 Assert(!pCtx->ss.Attr.n.u2Dpl);
4333 }
4334 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4335 {
4336 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4337 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
4338 Assert(pCtx->ss.Attr.n.u1Present);
4339 Assert(!(pCtx->ss.Attr.u & 0xf00));
4340 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
4341 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4342 || !(pCtx->ss.Attr.n.u1Granularity));
4343 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
4344 || (pCtx->ss.Attr.n.u1Granularity));
4345 }
4346 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
4347 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4348 {
4349 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4350 Assert(pCtx->ds.Attr.n.u1Present);
4351 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
4352 Assert(!(pCtx->ds.Attr.u & 0xf00));
4353 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
4354 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4355 || !(pCtx->ds.Attr.n.u1Granularity));
4356 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
4357 || (pCtx->ds.Attr.n.u1Granularity));
4358 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4359 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
4360 }
4361 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4362 {
4363 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4364 Assert(pCtx->es.Attr.n.u1Present);
4365 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
4366 Assert(!(pCtx->es.Attr.u & 0xf00));
4367 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
4368 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
4369 || !(pCtx->es.Attr.n.u1Granularity));
4370 Assert( !(pCtx->es.u32Limit & 0xfff00000)
4371 || (pCtx->es.Attr.n.u1Granularity));
4372 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4373 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
4374 }
4375 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4376 {
4377 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4378 Assert(pCtx->fs.Attr.n.u1Present);
4379 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
4380 Assert(!(pCtx->fs.Attr.u & 0xf00));
4381 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
4382 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
4383 || !(pCtx->fs.Attr.n.u1Granularity));
4384 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
4385 || (pCtx->fs.Attr.n.u1Granularity));
4386 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4387 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4388 }
4389 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
4390 {
4391 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4392 Assert(pCtx->gs.Attr.n.u1Present);
4393 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
4394 Assert(!(pCtx->gs.Attr.u & 0xf00));
4395 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
4396 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
4397 || !(pCtx->gs.Attr.n.u1Granularity));
4398 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
4399 || (pCtx->gs.Attr.n.u1Granularity));
4400 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4401 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4402 }
4403 /* 64-bit capable CPUs. */
4404# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4405 if (HMVMX_IS_64BIT_HOST_MODE())
4406 {
4407 Assert(!(pCtx->cs.u64Base >> 32));
4408 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
4409 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
4410 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
4411 }
4412# endif
4413 }
4414 else if ( CPUMIsGuestInV86ModeEx(pCtx)
4415 || ( CPUMIsGuestInRealModeEx(pCtx)
4416 && !pVM->hm.s.vmx.fUnrestrictedGuest))
4417 {
4418 /* Real and v86 mode checks. */
4419 /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
4420 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
4421 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4422 {
4423 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
4424 }
4425 else
4426 {
4427 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
4428 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
4429 }
4430
4431 /* CS */
4432 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
4433 Assert(pCtx->cs.u32Limit == 0xffff);
4434 Assert(u32CSAttr == 0xf3);
4435 /* SS */
4436 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
4437 Assert(pCtx->ss.u32Limit == 0xffff);
4438 Assert(u32SSAttr == 0xf3);
4439 /* DS */
4440 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
4441 Assert(pCtx->ds.u32Limit == 0xffff);
4442 Assert(u32DSAttr == 0xf3);
4443 /* ES */
4444 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
4445 Assert(pCtx->es.u32Limit == 0xffff);
4446 Assert(u32ESAttr == 0xf3);
4447 /* FS */
4448 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
4449 Assert(pCtx->fs.u32Limit == 0xffff);
4450 Assert(u32FSAttr == 0xf3);
4451 /* GS */
4452 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
4453 Assert(pCtx->gs.u32Limit == 0xffff);
4454 Assert(u32GSAttr == 0xf3);
4455 /* 64-bit capable CPUs. */
4456# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4457 if (HMVMX_IS_64BIT_HOST_MODE())
4458 {
4459 Assert(!(pCtx->cs.u64Base >> 32));
4460 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
4461 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
4462 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
4463 }
4464# endif
4465 }
4466}
4467#endif /* VBOX_STRICT */
4468
4469
4470/**
4471 * Writes a guest segment register into the guest-state area in the VMCS.
4472 *
4473 * @returns VBox status code.
4474 * @param pVCpu Pointer to the VMCPU.
4475 * @param idxSel Index of the selector in the VMCS.
4476 * @param idxLimit Index of the segment limit in the VMCS.
4477 * @param idxBase Index of the segment base in the VMCS.
4478 * @param idxAccess Index of the access rights of the segment in the VMCS.
4479 * @param pSelReg Pointer to the segment selector.
4480 *
4481 * @remarks No-long-jump zone!!!
4482 */
4483static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
4484 uint32_t idxAccess, PCPUMSELREG pSelReg)
4485{
4486 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
4487 AssertRCReturn(rc, rc);
4488 rc = VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
4489 AssertRCReturn(rc, rc);
4490 rc = VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
4491 AssertRCReturn(rc, rc);
4492
4493 uint32_t u32Access = pSelReg->Attr.u;
4494 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4495 {
4496 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
4497 u32Access = 0xf3;
4498 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
4499 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
4500 }
4501 else
4502 {
4503 /*
4504 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
4505 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
4506 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
4507 * loaded in protected-mode have their attribute as 0.
4508 */
4509 if (!u32Access)
4510 u32Access = X86DESCATTR_UNUSABLE;
4511 }
4512
4513 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
4514 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
4515 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
4516
4517 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
4518 AssertRCReturn(rc, rc);
4519 return rc;
4520}
4521
4522
4523/**
4524 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
4525 * into the guest-state area in the VMCS.
4526 *
4527 * @returns VBox status code.
4528 * @param pVM Pointer to the VM.
4529 * @param pVCPU Pointer to the VMCPU.
4530 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4531 * out-of-sync. Make sure to update the required fields
4532 * before using them.
4533 *
4534 * @remarks ASSUMES pMixedCtx->cr0 is up to date (strict builds validation).
4535 * @remarks No-long-jump zone!!!
4536 */
4537static int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4538{
4539 int rc = VERR_INTERNAL_ERROR_5;
4540 PVM pVM = pVCpu->CTX_SUFF(pVM);
4541
4542 /*
4543 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
4544 */
4545 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
4546 {
4547 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
4548 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4549 {
4550 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pMixedCtx->cs.Attr.u;
4551 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pMixedCtx->ss.Attr.u;
4552 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pMixedCtx->ds.Attr.u;
4553 pVCpu->hm.s.vmx.RealMode.AttrES.u = pMixedCtx->es.Attr.u;
4554 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pMixedCtx->fs.Attr.u;
4555 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pMixedCtx->gs.Attr.u;
4556 }
4557
4558#ifdef VBOX_WITH_REM
4559 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
4560 {
4561 Assert(pVM->hm.s.vmx.pRealModeTSS);
4562 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
4563 if ( pVCpu->hm.s.vmx.fWasInRealMode
4564 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
4565 {
4566 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
4567 in real-mode (e.g. OpenBSD 4.0) */
4568 REMFlushTBs(pVM);
4569 Log4(("Load[%RU32]: Switch to protected mode detected!\n", pVCpu->idCpu));
4570 pVCpu->hm.s.vmx.fWasInRealMode = false;
4571 }
4572 }
4573#endif
4574 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
4575 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs);
4576 AssertRCReturn(rc, rc);
4577 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
4578 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss);
4579 AssertRCReturn(rc, rc);
4580 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
4581 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds);
4582 AssertRCReturn(rc, rc);
4583 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
4584 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es);
4585 AssertRCReturn(rc, rc);
4586 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
4587 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs);
4588 AssertRCReturn(rc, rc);
4589 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
4590 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs);
4591 AssertRCReturn(rc, rc);
4592
4593#ifdef VBOX_STRICT
4594 /* Validate. */
4595 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
4596#endif
4597
4598 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
4599 Log4(("Load[%RU32]: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pVCpu->idCpu, pMixedCtx->cs.Sel,
4600 pMixedCtx->cs.u64Base, pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
4601 }
4602
4603 /*
4604 * Guest TR.
4605 */
4606 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))
4607 {
4608 /*
4609 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
4610 * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
4611 * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
4612 */
4613 uint16_t u16Sel = 0;
4614 uint32_t u32Limit = 0;
4615 uint64_t u64Base = 0;
4616 uint32_t u32AccessRights = 0;
4617
4618 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4619 {
4620 u16Sel = pMixedCtx->tr.Sel;
4621 u32Limit = pMixedCtx->tr.u32Limit;
4622 u64Base = pMixedCtx->tr.u64Base;
4623 u32AccessRights = pMixedCtx->tr.Attr.u;
4624 }
4625 else
4626 {
4627 Assert(pVM->hm.s.vmx.pRealModeTSS);
4628 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
4629
4630 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
4631 RTGCPHYS GCPhys;
4632 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
4633 AssertRCReturn(rc, rc);
4634
4635 X86DESCATTR DescAttr;
4636 DescAttr.u = 0;
4637 DescAttr.n.u1Present = 1;
4638 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
4639
4640 u16Sel = 0;
4641 u32Limit = HM_VTX_TSS_SIZE;
4642 u64Base = GCPhys; /* in real-mode phys = virt. */
4643 u32AccessRights = DescAttr.u;
4644 }
4645
4646 /* Validate. */
4647 Assert(!(u16Sel & RT_BIT(2)));
4648 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
4649 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
4650 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
4651 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
4652 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
4653 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
4654 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
4655 Assert( (u32Limit & 0xfff) == 0xfff
4656 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
4657 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
4658 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
4659
4660 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel); AssertRCReturn(rc, rc);
4661 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRCReturn(rc, rc);
4662 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRCReturn(rc, rc);
4663 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc);
4664
4665 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
4666 Log4(("Load[%RU32]: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", pVCpu->idCpu, u64Base));
4667 }
4668
4669 /*
4670 * Guest GDTR.
4671 */
4672 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))
4673 {
4674 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); AssertRCReturn(rc, rc);
4675 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); AssertRCReturn(rc, rc);
4676
4677 /* Validate. */
4678 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4679
4680 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);
4681 Log4(("Load[%RU32]: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->gdtr.pGdt));
4682 }
4683
4684 /*
4685 * Guest LDTR.
4686 */
4687 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))
4688 {
4689 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
4690 uint32_t u32Access = 0;
4691 if (!pMixedCtx->ldtr.Attr.u)
4692 u32Access = X86DESCATTR_UNUSABLE;
4693 else
4694 u32Access = pMixedCtx->ldtr.Attr.u;
4695
4696 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel); AssertRCReturn(rc, rc);
4697 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit); AssertRCReturn(rc, rc);
4698 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base); AssertRCReturn(rc, rc);
4699 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRCReturn(rc, rc);
4700
4701 /* Validate. */
4702 if (!(u32Access & X86DESCATTR_UNUSABLE))
4703 {
4704 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
4705 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
4706 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
4707 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
4708 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
4709 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
4710 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
4711 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
4712 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
4713 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
4714 }
4715
4716 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
4717 Log4(("Load[%RU32]: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->ldtr.u64Base));
4718 }
4719
4720 /*
4721 * Guest IDTR.
4722 */
4723 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))
4724 {
4725 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); AssertRCReturn(rc, rc);
4726 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); AssertRCReturn(rc, rc);
4727
4728 /* Validate. */
4729 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4730
4731 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);
4732 Log4(("Load[%RU32]: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->idtr.pIdt));
4733 }
4734
4735 return VINF_SUCCESS;
4736}
4737
4738
4739/**
4740 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
4741 * areas.
4742 *
4743 * These MSRs will automatically be loaded to the host CPU on every successful
4744 * VM-entry and stored from the host CPU on every successful VM-exit. This also
4745 * creates/updates MSR slots for the host MSRs. The actual host MSR values are
4746 * -not- updated here for performance reasons. See hmR0VmxSaveHostMsrs().
4747 *
4748 * Also loads the sysenter MSRs into the guest-state area in the VMCS.
4749 *
4750 * @returns VBox status code.
4751 * @param pVCpu Pointer to the VMCPU.
4752 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4753 * out-of-sync. Make sure to update the required fields
4754 * before using them.
4755 *
4756 * @remarks No-long-jump zone!!!
4757 */
4758static int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4759{
4760 AssertPtr(pVCpu);
4761 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
4762
4763 /*
4764 * MSRs that we use the auto-load/store MSR area in the VMCS.
4765 */
4766 PVM pVM = pVCpu->CTX_SUFF(pVM);
4767 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS))
4768 {
4769 /* For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). */
4770#if HC_ARCH_BITS == 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4771 if (pVM->hm.s.fAllow64BitGuests)
4772 {
4773 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false /* fUpdateHostMsr */);
4774 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false /* fUpdateHostMsr */);
4775 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false /* fUpdateHostMsr */);
4776 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false /* fUpdateHostMsr */);
4777# ifdef DEBUG
4778 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
4779 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
4780 {
4781 Log4(("Load[%RU32]: MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", pVCpu->idCpu, i, pMsr->u32Msr,
4782 pMsr->u64Value));
4783 }
4784# endif
4785 }
4786#endif
4787 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
4788 }
4789
4790 /*
4791 * Guest Sysenter MSRs.
4792 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
4793 * VM-exits on WRMSRs for these MSRs.
4794 */
4795 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR))
4796 {
4797 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc);
4798 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);
4799 }
4800
4801 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR))
4802 {
4803 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc);
4804 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
4805 }
4806
4807 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR))
4808 {
4809 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc);
4810 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
4811 }
4812
4813 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))
4814 {
4815 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
4816 {
4817 /*
4818 * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
4819 * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
4820 */
4821 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
4822 {
4823 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER);
4824 AssertRCReturn(rc,rc);
4825 Log4(("Load[%RU32]: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pVCpu->idCpu, pMixedCtx->msrEFER));
4826 }
4827 else
4828 {
4829 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */);
4830 /* We need to intercept reads too, see @bugref{7386} comment #16. */
4831 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
4832 Log4(("Load[%RU32]: MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", pVCpu->idCpu, MSR_K6_EFER,
4833 pMixedCtx->msrEFER, pVCpu->hm.s.vmx.cMsrs));
4834 }
4835 }
4836 else if (!pVM->hm.s.vmx.fSupportsVmcsEfer)
4837 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER);
4838 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
4839 }
4840
4841 return VINF_SUCCESS;
4842}
4843
4844
4845/**
4846 * Loads the guest activity state into the guest-state area in the VMCS.
4847 *
4848 * @returns VBox status code.
4849 * @param pVCpu Pointer to the VMCPU.
4850 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4851 * out-of-sync. Make sure to update the required fields
4852 * before using them.
4853 *
4854 * @remarks No-long-jump zone!!!
4855 */
4856static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
4857{
4858 NOREF(pCtx);
4859 /** @todo See if we can make use of other states, e.g.
4860 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
4861 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE))
4862 {
4863 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
4864 AssertRCReturn(rc, rc);
4865
4866 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE);
4867 }
4868 return VINF_SUCCESS;
4869}
4870
4871
4872/**
4873 * Sets up the appropriate function to run guest code.
4874 *
4875 * @returns VBox status code.
4876 * @param pVCpu Pointer to the VMCPU.
4877 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4878 * out-of-sync. Make sure to update the required fields
4879 * before using them.
4880 *
4881 * @remarks No-long-jump zone!!!
4882 */
4883static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4884{
4885 if (CPUMIsGuestInLongModeEx(pMixedCtx))
4886 {
4887#ifndef VBOX_ENABLE_64_BITS_GUESTS
4888 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4889#endif
4890 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
4891#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4892 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
4893 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
4894 {
4895 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4896 {
4897 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4898 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_VMX_EXIT_CTLS
4899 | HM_CHANGED_VMX_ENTRY_CTLS
4900 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
4901 }
4902 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
4903 }
4904#else
4905 /* 64-bit host or hybrid host. */
4906 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
4907#endif
4908 }
4909 else
4910 {
4911 /* Guest is not in long mode, use the 32-bit handler. */
4912#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4913 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32)
4914 {
4915 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4916 {
4917 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4918 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_VMX_EXIT_CTLS
4919 | HM_CHANGED_VMX_ENTRY_CTLS
4920 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
4921 }
4922 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4923 }
4924#else
4925 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4926#endif
4927 }
4928 Assert(pVCpu->hm.s.vmx.pfnStartVM);
4929 return VINF_SUCCESS;
4930}
4931
4932
4933/**
4934 * Wrapper for running the guest code in VT-x.
4935 *
4936 * @returns VBox strict status code.
4937 * @param pVM Pointer to the VM.
4938 * @param pVCpu Pointer to the VMCPU.
4939 * @param pCtx Pointer to the guest-CPU context.
4940 *
4941 * @remarks No-long-jump zone!!!
4942 */
4943DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4944{
4945 /*
4946 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
4947 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
4948 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
4949 */
4950 bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
4951 /** @todo Add stats for resume vs launch. */
4952#ifdef VBOX_WITH_KERNEL_USING_XMM
4953 return HMR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
4954#else
4955 return pVCpu->hm.s.vmx.pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
4956#endif
4957}
4958
4959
4960/**
4961 * Reports world-switch error and dumps some useful debug info.
4962 *
4963 * @param pVM Pointer to the VM.
4964 * @param pVCpu Pointer to the VMCPU.
4965 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
4966 * @param pCtx Pointer to the guest-CPU context.
4967 * @param pVmxTransient Pointer to the VMX transient structure (only
4968 * exitReason updated).
4969 */
4970static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
4971{
4972 Assert(pVM);
4973 Assert(pVCpu);
4974 Assert(pCtx);
4975 Assert(pVmxTransient);
4976 HMVMX_ASSERT_PREEMPT_SAFE();
4977
4978 Log4(("VM-entry failure: %Rrc\n", rcVMRun));
4979 switch (rcVMRun)
4980 {
4981 case VERR_VMX_INVALID_VMXON_PTR:
4982 AssertFailed();
4983 break;
4984 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
4985 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
4986 {
4987 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
4988 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
4989 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
4990 AssertRC(rc);
4991
4992 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
4993 /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted().
4994 Cannot do it here as we may have been long preempted. */
4995
4996#ifdef VBOX_STRICT
4997 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
4998 pVmxTransient->uExitReason));
4999 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));
5000 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
5001 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
5002 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
5003 else
5004 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
5005 Log4(("Entered host CPU %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));
5006 Log4(("Current host CPU %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));
5007
5008 /* VMX control bits. */
5009 uint32_t u32Val;
5010 uint64_t u64Val;
5011 HMVMXHCUINTREG uHCReg;
5012 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
5013 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
5014 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
5015 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
5016 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
5017 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
5018 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
5019 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
5020 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
5021 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
5022 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
5023 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
5024 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
5025 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
5026 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
5027 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
5028 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
5029 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
5030 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
5031 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
5032 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
5033 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
5034 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
5035 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
5036 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
5037 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
5038 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
5039 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
5040 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
5041 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
5042 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
5043 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
5044 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
5045 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
5046 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
5047 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
5048 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
5049 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
5050 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
5051 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
5052 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
5053 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
5054
5055 /* Guest bits. */
5056 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc);
5057 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val));
5058 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc);
5059 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val));
5060 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
5061 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
5062 rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc);
5063 Log4(("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val));
5064
5065 /* Host bits. */
5066 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
5067 Log4(("Host CR0 %#RHr\n", uHCReg));
5068 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
5069 Log4(("Host CR3 %#RHr\n", uHCReg));
5070 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
5071 Log4(("Host CR4 %#RHr\n", uHCReg));
5072
5073 RTGDTR HostGdtr;
5074 PCX86DESCHC pDesc;
5075 ASMGetGDTR(&HostGdtr);
5076 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_CS, &u32Val); AssertRC(rc);
5077 Log4(("Host CS %#08x\n", u32Val));
5078 if (u32Val < HostGdtr.cbGdt)
5079 {
5080 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5081 HMR0DumpDescriptor(pDesc, u32Val, "CS: ");
5082 }
5083
5084 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc);
5085 Log4(("Host DS %#08x\n", u32Val));
5086 if (u32Val < HostGdtr.cbGdt)
5087 {
5088 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5089 HMR0DumpDescriptor(pDesc, u32Val, "DS: ");
5090 }
5091
5092 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc);
5093 Log4(("Host ES %#08x\n", u32Val));
5094 if (u32Val < HostGdtr.cbGdt)
5095 {
5096 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5097 HMR0DumpDescriptor(pDesc, u32Val, "ES: ");
5098 }
5099
5100 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc);
5101 Log4(("Host FS %#08x\n", u32Val));
5102 if (u32Val < HostGdtr.cbGdt)
5103 {
5104 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5105 HMR0DumpDescriptor(pDesc, u32Val, "FS: ");
5106 }
5107
5108 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc);
5109 Log4(("Host GS %#08x\n", u32Val));
5110 if (u32Val < HostGdtr.cbGdt)
5111 {
5112 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5113 HMR0DumpDescriptor(pDesc, u32Val, "GS: ");
5114 }
5115
5116 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc);
5117 Log4(("Host SS %#08x\n", u32Val));
5118 if (u32Val < HostGdtr.cbGdt)
5119 {
5120 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5121 HMR0DumpDescriptor(pDesc, u32Val, "SS: ");
5122 }
5123
5124 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc);
5125 Log4(("Host TR %#08x\n", u32Val));
5126 if (u32Val < HostGdtr.cbGdt)
5127 {
5128 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5129 HMR0DumpDescriptor(pDesc, u32Val, "TR: ");
5130 }
5131
5132 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
5133 Log4(("Host TR Base %#RHv\n", uHCReg));
5134 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
5135 Log4(("Host GDTR Base %#RHv\n", uHCReg));
5136 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
5137 Log4(("Host IDTR Base %#RHv\n", uHCReg));
5138 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
5139 Log4(("Host SYSENTER CS %#08x\n", u32Val));
5140 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
5141 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg));
5142 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
5143 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg));
5144 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
5145 Log4(("Host RSP %#RHv\n", uHCReg));
5146 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
5147 Log4(("Host RIP %#RHv\n", uHCReg));
5148# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
5149 if (HMVMX_IS_64BIT_HOST_MODE())
5150 {
5151 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
5152 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
5153 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
5154 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
5155 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
5156 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
5157 }
5158# endif
5159#endif /* VBOX_STRICT */
5160 break;
5161 }
5162
5163 default:
5164 /* Impossible */
5165 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
5166 break;
5167 }
5168 NOREF(pVM); NOREF(pCtx);
5169}
5170
5171
5172#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
5173#ifndef VMX_USE_CACHED_VMCS_ACCESSES
5174# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
5175#endif
5176#ifdef VBOX_STRICT
5177static bool hmR0VmxIsValidWriteField(uint32_t idxField)
5178{
5179 switch (idxField)
5180 {
5181 case VMX_VMCS_GUEST_RIP:
5182 case VMX_VMCS_GUEST_RSP:
5183 case VMX_VMCS_GUEST_SYSENTER_EIP:
5184 case VMX_VMCS_GUEST_SYSENTER_ESP:
5185 case VMX_VMCS_GUEST_GDTR_BASE:
5186 case VMX_VMCS_GUEST_IDTR_BASE:
5187 case VMX_VMCS_GUEST_CS_BASE:
5188 case VMX_VMCS_GUEST_DS_BASE:
5189 case VMX_VMCS_GUEST_ES_BASE:
5190 case VMX_VMCS_GUEST_FS_BASE:
5191 case VMX_VMCS_GUEST_GS_BASE:
5192 case VMX_VMCS_GUEST_SS_BASE:
5193 case VMX_VMCS_GUEST_LDTR_BASE:
5194 case VMX_VMCS_GUEST_TR_BASE:
5195 case VMX_VMCS_GUEST_CR3:
5196 return true;
5197 }
5198 return false;
5199}
5200
5201static bool hmR0VmxIsValidReadField(uint32_t idxField)
5202{
5203 switch (idxField)
5204 {
5205 /* Read-only fields. */
5206 case VMX_VMCS_RO_EXIT_QUALIFICATION:
5207 return true;
5208 }
5209 /* Remaining readable fields should also be writable. */
5210 return hmR0VmxIsValidWriteField(idxField);
5211}
5212#endif /* VBOX_STRICT */
5213
5214
5215/**
5216 * Executes the specified handler in 64-bit mode.
5217 *
5218 * @returns VBox status code.
5219 * @param pVM Pointer to the VM.
5220 * @param pVCpu Pointer to the VMCPU.
5221 * @param pCtx Pointer to the guest CPU context.
5222 * @param enmOp The operation to perform.
5223 * @param cParams Number of parameters.
5224 * @param paParam Array of 32-bit parameters.
5225 */
5226VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp,
5227 uint32_t cParams, uint32_t *paParam)
5228{
5229 int rc, rc2;
5230 PHMGLOBALCPUINFO pCpu;
5231 RTHCPHYS HCPhysCpuPage;
5232 RTCCUINTREG uOldEflags;
5233
5234 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
5235 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
5236 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
5237 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
5238
5239#ifdef VBOX_STRICT
5240 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
5241 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
5242
5243 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
5244 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
5245#endif
5246
5247 /* Disable interrupts. */
5248 uOldEflags = ASMIntDisableFlags();
5249
5250#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
5251 RTCPUID idHostCpu = RTMpCpuId();
5252 CPUMR0SetLApic(pVCpu, idHostCpu);
5253#endif
5254
5255 pCpu = HMR0GetCurrentCpu();
5256 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
5257
5258 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
5259 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5260
5261 /* Leave VMX Root Mode. */
5262 VMXDisable();
5263
5264 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
5265
5266 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
5267 CPUMSetHyperEIP(pVCpu, enmOp);
5268 for (int i = (int)cParams - 1; i >= 0; i--)
5269 CPUMPushHyper(pVCpu, paParam[i]);
5270
5271 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
5272
5273 /* Call the switcher. */
5274 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
5275 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
5276
5277 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
5278 /* Make sure the VMX instructions don't cause #UD faults. */
5279 SUPR0ChangeCR4(X86_CR4_VMXE, ~0);
5280
5281 /* Re-enter VMX Root Mode */
5282 rc2 = VMXEnable(HCPhysCpuPage);
5283 if (RT_FAILURE(rc2))
5284 {
5285 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
5286 ASMSetFlags(uOldEflags);
5287 return rc2;
5288 }
5289
5290 rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5291 AssertRC(rc2);
5292 Assert(!(ASMGetFlags() & X86_EFL_IF));
5293 ASMSetFlags(uOldEflags);
5294 return rc;
5295}
5296
5297
5298/**
5299 * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts
5300 * supporting 64-bit guests.
5301 *
5302 * @returns VBox status code.
5303 * @param fResume Whether to VMLAUNCH or VMRESUME.
5304 * @param pCtx Pointer to the guest-CPU context.
5305 * @param pCache Pointer to the VMCS cache.
5306 * @param pVM Pointer to the VM.
5307 * @param pVCpu Pointer to the VMCPU.
5308 */
5309DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
5310{
5311 PHMGLOBALCPUINFO pCpu = NULL;
5312 RTHCPHYS HCPhysCpuPage = 0;
5313 int rc = VERR_INTERNAL_ERROR_5;
5314
5315 pCpu = HMR0GetCurrentCpu();
5316 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
5317
5318#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5319 pCache->uPos = 1;
5320 pCache->interPD = PGMGetInterPaeCR3(pVM);
5321 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
5322#endif
5323
5324#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5325 pCache->TestIn.HCPhysCpuPage = 0;
5326 pCache->TestIn.HCPhysVmcs = 0;
5327 pCache->TestIn.pCache = 0;
5328 pCache->TestOut.HCPhysVmcs = 0;
5329 pCache->TestOut.pCache = 0;
5330 pCache->TestOut.pCtx = 0;
5331 pCache->TestOut.eflags = 0;
5332#endif
5333
5334 uint32_t aParam[10];
5335 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
5336 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
5337 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
5338 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
5339 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
5340 aParam[5] = 0;
5341 aParam[6] = VM_RC_ADDR(pVM, pVM);
5342 aParam[7] = 0;
5343 aParam[8] = VM_RC_ADDR(pVM, pVCpu);
5344 aParam[9] = 0;
5345
5346#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5347 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
5348 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
5349#endif
5350 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]);
5351
5352#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5353 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
5354 Assert(pCtx->dr[4] == 10);
5355 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
5356#endif
5357
5358#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5359 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
5360 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5361 pVCpu->hm.s.vmx.HCPhysVmcs));
5362 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5363 pCache->TestOut.HCPhysVmcs));
5364 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
5365 pCache->TestOut.pCache));
5366 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
5367 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
5368 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
5369 pCache->TestOut.pCtx));
5370 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
5371#endif
5372 return rc;
5373}
5374
5375
5376/**
5377 * Initialize the VMCS-Read cache.
5378 *
5379 * The VMCS cache is used for 32-bit hosts running 64-bit guests (except 32-bit
5380 * Darwin which runs with 64-bit paging in 32-bit mode) for 64-bit fields that
5381 * cannot be accessed in 32-bit mode. Some 64-bit fields -can- be accessed
5382 * (those that have a 32-bit FULL & HIGH part).
5383 *
5384 * @returns VBox status code.
5385 * @param pVM Pointer to the VM.
5386 * @param pVCpu Pointer to the VMCPU.
5387 */
5388static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
5389{
5390#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
5391{ \
5392 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
5393 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
5394 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
5395 ++cReadFields; \
5396}
5397
5398 AssertPtr(pVM);
5399 AssertPtr(pVCpu);
5400 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5401 uint32_t cReadFields = 0;
5402
5403 /*
5404 * Don't remove the #if 0'd fields in this code. They're listed here for consistency
5405 * and serve to indicate exceptions to the rules.
5406 */
5407
5408 /* Guest-natural selector base fields. */
5409#if 0
5410 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
5411 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
5412 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
5413#endif
5414 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
5415 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
5416 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
5417 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
5418 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
5419 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
5420 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
5421 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
5422 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
5423 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
5424 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
5425 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
5426#if 0
5427 /* Unused natural width guest-state fields. */
5428 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
5429 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
5430#endif
5431 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
5432 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
5433
5434 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
5435#if 0
5436 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
5437 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
5438 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
5439 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
5440 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
5441 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
5442 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
5443 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
5444 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
5445#endif
5446
5447 /* Natural width guest-state fields. */
5448 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
5449#if 0
5450 /* Currently unused field. */
5451 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
5452#endif
5453
5454 if (pVM->hm.s.fNestedPaging)
5455 {
5456 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
5457 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
5458 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
5459 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
5460 }
5461 else
5462 {
5463 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
5464 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
5465 }
5466
5467#undef VMXLOCAL_INIT_READ_CACHE_FIELD
5468 return VINF_SUCCESS;
5469}
5470
5471
5472/**
5473 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
5474 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
5475 * darwin, running 64-bit guests).
5476 *
5477 * @returns VBox status code.
5478 * @param pVCpu Pointer to the VMCPU.
5479 * @param idxField The VMCS field encoding.
5480 * @param u64Val 16, 32 or 64-bit value.
5481 */
5482VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5483{
5484 int rc;
5485 switch (idxField)
5486 {
5487 /*
5488 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
5489 */
5490 /* 64-bit Control fields. */
5491 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
5492 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
5493 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
5494 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
5495 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
5496 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
5497 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
5498 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
5499 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
5500 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
5501 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
5502 case VMX_VMCS64_CTRL_EPTP_FULL:
5503 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
5504 /* 64-bit Guest-state fields. */
5505 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
5506 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
5507 case VMX_VMCS64_GUEST_PAT_FULL:
5508 case VMX_VMCS64_GUEST_EFER_FULL:
5509 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
5510 case VMX_VMCS64_GUEST_PDPTE0_FULL:
5511 case VMX_VMCS64_GUEST_PDPTE1_FULL:
5512 case VMX_VMCS64_GUEST_PDPTE2_FULL:
5513 case VMX_VMCS64_GUEST_PDPTE3_FULL:
5514 /* 64-bit Host-state fields. */
5515 case VMX_VMCS64_HOST_FIELD_PAT_FULL:
5516 case VMX_VMCS64_HOST_FIELD_EFER_FULL:
5517 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
5518 {
5519 rc = VMXWriteVmcs32(idxField, u64Val);
5520 rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32));
5521 break;
5522 }
5523
5524 /*
5525 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
5526 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
5527 */
5528 /* Natural-width Guest-state fields. */
5529 case VMX_VMCS_GUEST_CR3:
5530 case VMX_VMCS_GUEST_ES_BASE:
5531 case VMX_VMCS_GUEST_CS_BASE:
5532 case VMX_VMCS_GUEST_SS_BASE:
5533 case VMX_VMCS_GUEST_DS_BASE:
5534 case VMX_VMCS_GUEST_FS_BASE:
5535 case VMX_VMCS_GUEST_GS_BASE:
5536 case VMX_VMCS_GUEST_LDTR_BASE:
5537 case VMX_VMCS_GUEST_TR_BASE:
5538 case VMX_VMCS_GUEST_GDTR_BASE:
5539 case VMX_VMCS_GUEST_IDTR_BASE:
5540 case VMX_VMCS_GUEST_RSP:
5541 case VMX_VMCS_GUEST_RIP:
5542 case VMX_VMCS_GUEST_SYSENTER_ESP:
5543 case VMX_VMCS_GUEST_SYSENTER_EIP:
5544 {
5545 if (!(u64Val >> 32))
5546 {
5547 /* If this field is 64-bit, VT-x will zero out the top bits. */
5548 rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
5549 }
5550 else
5551 {
5552 /* Assert that only the 32->64 switcher case should ever come here. */
5553 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
5554 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
5555 }
5556 break;
5557 }
5558
5559 default:
5560 {
5561 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
5562 rc = VERR_INVALID_PARAMETER;
5563 break;
5564 }
5565 }
5566 AssertRCReturn(rc, rc);
5567 return rc;
5568}
5569
5570
5571/**
5572 * Queue up a VMWRITE by using the VMCS write cache.
5573 * This is only used on 32-bit hosts (except darwin) for 64-bit guests.
5574 *
5575 * @param pVCpu Pointer to the VMCPU.
5576 * @param idxField The VMCS field encoding.
5577 * @param u64Val 16, 32 or 64-bit value.
5578 */
5579VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5580{
5581 AssertPtr(pVCpu);
5582 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5583
5584 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
5585 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
5586
5587 /* Make sure there are no duplicates. */
5588 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
5589 {
5590 if (pCache->Write.aField[i] == idxField)
5591 {
5592 pCache->Write.aFieldVal[i] = u64Val;
5593 return VINF_SUCCESS;
5594 }
5595 }
5596
5597 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
5598 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
5599 pCache->Write.cValidEntries++;
5600 return VINF_SUCCESS;
5601}
5602
5603/* Enable later when the assembly code uses these as callbacks. */
5604#if 0
5605/*
5606 * Loads the VMCS write-cache into the CPU (by executing VMWRITEs).
5607 *
5608 * @param pVCpu Pointer to the VMCPU.
5609 * @param pCache Pointer to the VMCS cache.
5610 *
5611 * @remarks No-long-jump zone!!!
5612 */
5613VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache)
5614{
5615 AssertPtr(pCache);
5616 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
5617 {
5618 int rc = VMXWriteVmcs64(pCache->Write.aField[i], pCache->Write.aFieldVal[i]);
5619 AssertRC(rc);
5620 }
5621 pCache->Write.cValidEntries = 0;
5622}
5623
5624
5625/**
5626 * Stores the VMCS read-cache from the CPU (by executing VMREADs).
5627 *
5628 * @param pVCpu Pointer to the VMCPU.
5629 * @param pCache Pointer to the VMCS cache.
5630 *
5631 * @remarks No-long-jump zone!!!
5632 */
5633VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache)
5634{
5635 AssertPtr(pCache);
5636 for (uint32_t i = 0; i < pCache->Read.cValidEntries; i++)
5637 {
5638 int rc = VMXReadVmcs64(pCache->Read.aField[i], &pCache->Read.aFieldVal[i]);
5639 AssertRC(rc);
5640 }
5641}
5642#endif
5643#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
5644
5645
5646/**
5647 * Sets up the usage of TSC-offsetting and updates the VMCS.
5648 *
5649 * If offsetting is not possible, cause VM-exits on RDTSC(P)s. Also sets up the
5650 * VMX preemption timer.
5651 *
5652 * @returns VBox status code.
5653 * @param pVM Pointer to the cross context VM structure.
5654 * @param pVCpu Pointer to the VMCPU.
5655 *
5656 * @remarks No-long-jump zone!!!
5657 */
5658static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVM pVM, PVMCPU pVCpu)
5659{
5660 int rc;
5661 bool fOffsettedTsc;
5662 bool fParavirtTsc;
5663 if (pVM->hm.s.vmx.fUsePreemptTimer)
5664 {
5665 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset,
5666 &fOffsettedTsc, &fParavirtTsc);
5667
5668 /* Make sure the returned values have sane upper and lower boundaries. */
5669 uint64_t u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
5670 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
5671 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
5672 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
5673
5674 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
5675 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);
5676 }
5677 else
5678 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc);
5679
5680 /** @todo later optimize this to be done elsewhere and not before every
5681 * VM-entry. */
5682 if (fParavirtTsc)
5683 {
5684 rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
5685 AssertRC(rc);
5686 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
5687 }
5688
5689 if (fOffsettedTsc)
5690 {
5691 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
5692 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
5693
5694 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5695 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5696 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
5697 }
5698 else
5699 {
5700 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
5701 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5702 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5703 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
5704 }
5705}
5706
5707
5708/**
5709 * Determines if an exception is a contributory exception.
5710 *
5711 * Contributory exceptions are ones which can cause double-faults unless the
5712 * original exception was a benign exception. Page-fault is intentionally not
5713 * included here as it's a conditional contributory exception.
5714 *
5715 * @returns true if the exception is contributory, false otherwise.
5716 * @param uVector The exception vector.
5717 */
5718DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
5719{
5720 switch (uVector)
5721 {
5722 case X86_XCPT_GP:
5723 case X86_XCPT_SS:
5724 case X86_XCPT_NP:
5725 case X86_XCPT_TS:
5726 case X86_XCPT_DE:
5727 return true;
5728 default:
5729 break;
5730 }
5731 return false;
5732}
5733
5734
5735/**
5736 * Sets an event as a pending event to be injected into the guest.
5737 *
5738 * @param pVCpu Pointer to the VMCPU.
5739 * @param u32IntInfo The VM-entry interruption-information field.
5740 * @param cbInstr The VM-entry instruction length in bytes (for software
5741 * interrupts, exceptions and privileged software
5742 * exceptions).
5743 * @param u32ErrCode The VM-entry exception error code.
5744 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
5745 * page-fault.
5746 *
5747 * @remarks Statistics counter assumes this is a guest event being injected or
5748 * re-injected into the guest, i.e. 'StatInjectPendingReflect' is
5749 * always incremented.
5750 */
5751DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
5752 RTGCUINTPTR GCPtrFaultAddress)
5753{
5754 Assert(!pVCpu->hm.s.Event.fPending);
5755 pVCpu->hm.s.Event.fPending = true;
5756 pVCpu->hm.s.Event.u64IntInfo = u32IntInfo;
5757 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
5758 pVCpu->hm.s.Event.cbInstr = cbInstr;
5759 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
5760
5761 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
5762}
5763
5764
5765/**
5766 * Sets a double-fault (#DF) exception as pending-for-injection into the VM.
5767 *
5768 * @param pVCpu Pointer to the VMCPU.
5769 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5770 * out-of-sync. Make sure to update the required fields
5771 * before using them.
5772 */
5773DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5774{
5775 NOREF(pMixedCtx);
5776 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
5777 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5778 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5779 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5780}
5781
5782
5783/**
5784 * Handle a condition that occurred while delivering an event through the guest
5785 * IDT.
5786 *
5787 * @returns VBox status code (informational error codes included).
5788 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5789 * @retval VINF_HM_DOUBLE_FAULT if a #DF condition was detected and we ought to
5790 * continue execution of the guest which will delivery the #DF.
5791 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5792 *
5793 * @param pVCpu Pointer to the VMCPU.
5794 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5795 * out-of-sync. Make sure to update the required fields
5796 * before using them.
5797 * @param pVmxTransient Pointer to the VMX transient structure.
5798 *
5799 * @remarks No-long-jump zone!!!
5800 */
5801static int hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
5802{
5803 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
5804
5805 int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
5806 AssertRCReturn(rc, rc);
5807 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
5808 AssertRCReturn(rc, rc);
5809
5810 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
5811 {
5812 uint32_t uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
5813 uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
5814
5815 typedef enum
5816 {
5817 VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
5818 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
5819 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
5820 VMXREFLECTXCPT_NONE /* Nothing to reflect. */
5821 } VMXREFLECTXCPT;
5822
5823 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
5824 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
5825 if (VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo))
5826 {
5827 if (uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
5828 {
5829 enmReflect = VMXREFLECTXCPT_XCPT;
5830#ifdef VBOX_STRICT
5831 if ( hmR0VmxIsContributoryXcpt(uIdtVector)
5832 && uExitVector == X86_XCPT_PF)
5833 {
5834 Log4(("IDT: vcpu[%RU32] Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5835 }
5836#endif
5837 if ( uExitVector == X86_XCPT_PF
5838 && uIdtVector == X86_XCPT_PF)
5839 {
5840 pVmxTransient->fVectoringDoublePF = true;
5841 Log4(("IDT: vcpu[%RU32] Vectoring Double #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5842 }
5843 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
5844 && hmR0VmxIsContributoryXcpt(uExitVector)
5845 && ( hmR0VmxIsContributoryXcpt(uIdtVector)
5846 || uIdtVector == X86_XCPT_PF))
5847 {
5848 enmReflect = VMXREFLECTXCPT_DF;
5849 }
5850 else if (uIdtVector == X86_XCPT_DF)
5851 enmReflect = VMXREFLECTXCPT_TF;
5852 }
5853 else if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
5854 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
5855 {
5856 /*
5857 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and
5858 * privileged software exception (#DB from ICEBP) as they reoccur when restarting the instruction.
5859 */
5860 enmReflect = VMXREFLECTXCPT_XCPT;
5861
5862 if (uExitVector == X86_XCPT_PF)
5863 {
5864 pVmxTransient->fVectoringPF = true;
5865 Log4(("IDT: vcpu[%RU32] Vectoring #PF due to Ext-Int/NMI. uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5866 }
5867 }
5868 }
5869 else if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5870 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
5871 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
5872 {
5873 /*
5874 * If event delivery caused an EPT violation/misconfig or APIC access VM-exit, then the VM-exit
5875 * interruption-information will not be valid as it's not an exception and we end up here. In such cases,
5876 * it is sufficient to reflect the original exception to the guest after handling the VM-exit.
5877 */
5878 enmReflect = VMXREFLECTXCPT_XCPT;
5879 }
5880
5881 /*
5882 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig etc.) occurred
5883 * while delivering the NMI, we need to clear the block-by-NMI field in the guest interruptibility-state before
5884 * re-delivering the NMI after handling the VM-exit. Otherwise the subsequent VM-entry would fail.
5885 *
5886 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}.
5887 */
5888 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5889 && enmReflect == VMXREFLECTXCPT_XCPT
5890 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
5891 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
5892 {
5893 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
5894 }
5895
5896 switch (enmReflect)
5897 {
5898 case VMXREFLECTXCPT_XCPT:
5899 {
5900 Assert( uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5901 && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5902 && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
5903
5904 uint32_t u32ErrCode = 0;
5905 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo))
5906 {
5907 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
5908 AssertRCReturn(rc, rc);
5909 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5910 }
5911
5912 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
5913 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
5914 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
5915 rc = VINF_SUCCESS;
5916 Log4(("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu,
5917 pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.u32ErrCode));
5918
5919 break;
5920 }
5921
5922 case VMXREFLECTXCPT_DF:
5923 {
5924 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
5925 rc = VINF_HM_DOUBLE_FAULT;
5926 Log4(("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,
5927 pVCpu->hm.s.Event.u64IntInfo, uIdtVector, uExitVector));
5928
5929 break;
5930 }
5931
5932 case VMXREFLECTXCPT_TF:
5933 {
5934 rc = VINF_EM_RESET;
5935 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector,
5936 uExitVector));
5937 break;
5938 }
5939
5940 default:
5941 Assert(rc == VINF_SUCCESS);
5942 break;
5943 }
5944 }
5945 else if ( VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo)
5946 && VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(pVmxTransient->uExitIntInfo)
5947 && uExitVector != X86_XCPT_DF
5948 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
5949 {
5950 /*
5951 * Execution of IRET caused this fault when NMI blocking was in effect (i.e we're in the guest NMI handler).
5952 * We need to set the block-by-NMI field so that NMIs remain blocked until the IRET execution is restarted.
5953 * See Intel spec. 30.7.1.2 "Resuming guest software after handling an exception".
5954 */
5955 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
5956 {
5957 Log4(("hmR0VmxCheckExitDueToEventDelivery: vcpu[%RU32] Setting VMCPU_FF_BLOCK_NMIS. Valid=%RTbool uExitReason=%u\n",
5958 pVCpu->idCpu, VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason));
5959 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5960 }
5961 }
5962
5963 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET);
5964 return rc;
5965}
5966
5967
5968/**
5969 * Saves the guest's CR0 register from the VMCS into the guest-CPU context.
5970 *
5971 * @returns VBox status code.
5972 * @param pVCpu Pointer to the VMCPU.
5973 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5974 * out-of-sync. Make sure to update the required fields
5975 * before using them.
5976 *
5977 * @remarks No-long-jump zone!!!
5978 */
5979static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5980{
5981 NOREF(pMixedCtx);
5982
5983 /*
5984 * While in the middle of saving guest-CR0, we could get preempted and re-invoked from the preemption hook,
5985 * see hmR0VmxLeave(). Safer to just make this code non-preemptible.
5986 */
5987 VMMRZCallRing3Disable(pVCpu);
5988 HM_DISABLE_PREEMPT();
5989
5990 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0))
5991 {
5992 uint32_t uVal = 0;
5993 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal);
5994 AssertRCReturn(rc, rc);
5995
5996 uint32_t uShadow = 0;
5997 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
5998 AssertRCReturn(rc, rc);
5999
6000 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR0Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR0Mask);
6001 CPUMSetGuestCR0(pVCpu, uVal);
6002 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0);
6003 }
6004
6005 HM_RESTORE_PREEMPT();
6006 VMMRZCallRing3Enable(pVCpu);
6007 return VINF_SUCCESS;
6008}
6009
6010
6011/**
6012 * Saves the guest's CR4 register from the VMCS into the guest-CPU context.
6013 *
6014 * @returns VBox status code.
6015 * @param pVCpu Pointer to the VMCPU.
6016 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6017 * out-of-sync. Make sure to update the required fields
6018 * before using them.
6019 *
6020 * @remarks No-long-jump zone!!!
6021 */
6022static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6023{
6024 NOREF(pMixedCtx);
6025
6026 int rc = VINF_SUCCESS;
6027 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4))
6028 {
6029 uint32_t uVal = 0;
6030 uint32_t uShadow = 0;
6031 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal);
6032 AssertRCReturn(rc, rc);
6033 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
6034 AssertRCReturn(rc, rc);
6035
6036 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR4Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR4Mask);
6037 CPUMSetGuestCR4(pVCpu, uVal);
6038 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4);
6039 }
6040 return rc;
6041}
6042
6043
6044/**
6045 * Saves the guest's RIP register from the VMCS into the guest-CPU context.
6046 *
6047 * @returns VBox status code.
6048 * @param pVCpu Pointer to the VMCPU.
6049 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6050 * out-of-sync. Make sure to update the required fields
6051 * before using them.
6052 *
6053 * @remarks No-long-jump zone!!!
6054 */
6055static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6056{
6057 int rc = VINF_SUCCESS;
6058 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP))
6059 {
6060 uint64_t u64Val = 0;
6061 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
6062 AssertRCReturn(rc, rc);
6063
6064 pMixedCtx->rip = u64Val;
6065 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP);
6066 }
6067 return rc;
6068}
6069
6070
6071/**
6072 * Saves the guest's RSP register from the VMCS into the guest-CPU context.
6073 *
6074 * @returns VBox status code.
6075 * @param pVCpu Pointer to the VMCPU.
6076 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6077 * out-of-sync. Make sure to update the required fields
6078 * before using them.
6079 *
6080 * @remarks No-long-jump zone!!!
6081 */
6082static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6083{
6084 int rc = VINF_SUCCESS;
6085 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP))
6086 {
6087 uint64_t u64Val = 0;
6088 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
6089 AssertRCReturn(rc, rc);
6090
6091 pMixedCtx->rsp = u64Val;
6092 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP);
6093 }
6094 return rc;
6095}
6096
6097
6098/**
6099 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
6100 *
6101 * @returns VBox status code.
6102 * @param pVCpu Pointer to the VMCPU.
6103 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6104 * out-of-sync. Make sure to update the required fields
6105 * before using them.
6106 *
6107 * @remarks No-long-jump zone!!!
6108 */
6109static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6110{
6111 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS))
6112 {
6113 uint32_t uVal = 0;
6114 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal);
6115 AssertRCReturn(rc, rc);
6116
6117 pMixedCtx->eflags.u32 = uVal;
6118 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) /* Undo our real-on-v86-mode changes to eflags if necessary. */
6119 {
6120 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
6121 Log4(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32));
6122
6123 pMixedCtx->eflags.Bits.u1VM = 0;
6124 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
6125 }
6126
6127 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS);
6128 }
6129 return VINF_SUCCESS;
6130}
6131
6132
6133/**
6134 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
6135 * guest-CPU context.
6136 */
6137DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6138{
6139 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6140 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
6141 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6142 return rc;
6143}
6144
6145
6146/**
6147 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
6148 * from the guest-state area in the VMCS.
6149 *
6150 * @param pVCpu Pointer to the VMCPU.
6151 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6152 * out-of-sync. Make sure to update the required fields
6153 * before using them.
6154 *
6155 * @remarks No-long-jump zone!!!
6156 */
6157static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6158{
6159 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE))
6160 {
6161 uint32_t uIntrState = 0;
6162 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
6163 AssertRC(rc);
6164
6165 if (!uIntrState)
6166 {
6167 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6168 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6169
6170 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6171 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6172 }
6173 else
6174 {
6175 if (uIntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
6176 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI))
6177 {
6178 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6179 AssertRC(rc);
6180 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */
6181 AssertRC(rc);
6182
6183 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
6184 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
6185 }
6186 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6187 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6188
6189 if (uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)
6190 {
6191 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6192 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6193 }
6194 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6195 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6196 }
6197
6198 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE);
6199 }
6200}
6201
6202
6203/**
6204 * Saves the guest's activity state.
6205 *
6206 * @returns VBox status code.
6207 * @param pVCpu Pointer to the VMCPU.
6208 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6209 * out-of-sync. Make sure to update the required fields
6210 * before using them.
6211 *
6212 * @remarks No-long-jump zone!!!
6213 */
6214static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6215{
6216 NOREF(pMixedCtx);
6217 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
6218 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_ACTIVITY_STATE);
6219 return VINF_SUCCESS;
6220}
6221
6222
6223/**
6224 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
6225 * the current VMCS into the guest-CPU context.
6226 *
6227 * @returns VBox status code.
6228 * @param pVCpu Pointer to the VMCPU.
6229 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6230 * out-of-sync. Make sure to update the required fields
6231 * before using them.
6232 *
6233 * @remarks No-long-jump zone!!!
6234 */
6235static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6236{
6237 int rc = VINF_SUCCESS;
6238 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
6239 {
6240 uint32_t u32Val = 0;
6241 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc);
6242 pMixedCtx->SysEnter.cs = u32Val;
6243 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR);
6244 }
6245
6246 uint64_t u64Val = 0;
6247 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
6248 {
6249 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &u64Val); AssertRCReturn(rc, rc);
6250 pMixedCtx->SysEnter.eip = u64Val;
6251 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR);
6252 }
6253 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
6254 {
6255 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &u64Val); AssertRCReturn(rc, rc);
6256 pMixedCtx->SysEnter.esp = u64Val;
6257 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR);
6258 }
6259 return rc;
6260}
6261
6262
6263/**
6264 * Saves the set of guest MSRs (that we restore lazily while leaving VT-x) from
6265 * the CPU back into the guest-CPU context.
6266 *
6267 * @returns VBox status code.
6268 * @param pVCpu Pointer to the VMCPU.
6269 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6270 * out-of-sync. Make sure to update the required fields
6271 * before using them.
6272 *
6273 * @remarks No-long-jump zone!!!
6274 */
6275static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6276{
6277#if HC_ARCH_BITS == 64
6278 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
6279 {
6280 /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */
6281 VMMRZCallRing3Disable(pVCpu);
6282 HM_DISABLE_PREEMPT();
6283
6284 /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */
6285 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS))
6286 {
6287 hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx);
6288 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6289 }
6290
6291 HM_RESTORE_PREEMPT();
6292 VMMRZCallRing3Enable(pVCpu);
6293 }
6294 else
6295 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6296#else
6297 NOREF(pMixedCtx);
6298 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6299#endif
6300
6301 return VINF_SUCCESS;
6302}
6303
6304
6305/**
6306 * Saves the auto load/store'd guest MSRs from the current VMCS into
6307 * the guest-CPU context.
6308 *
6309 * @returns VBox status code.
6310 * @param pVCpu Pointer to the VMCPU.
6311 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6312 * out-of-sync. Make sure to update the required fields
6313 * before using them.
6314 *
6315 * @remarks No-long-jump zone!!!
6316 */
6317static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6318{
6319 if (HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS))
6320 return VINF_SUCCESS;
6321
6322 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
6323 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
6324 Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", cMsrs));
6325 for (uint32_t i = 0; i < cMsrs; i++, pMsr++)
6326 {
6327 switch (pMsr->u32Msr)
6328 {
6329 case MSR_K8_TSC_AUX: CPUMR0SetGuestTscAux(pVCpu, pMsr->u64Value); break;
6330 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;
6331 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;
6332 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;
6333 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;
6334 case MSR_K6_EFER: /* Nothing to do here since we intercept writes, see hmR0VmxLoadGuestMsrs(). */
6335 break;
6336
6337 default:
6338 {
6339 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, cMsrs));
6340 pVCpu->hm.s.u32HMError = pMsr->u32Msr;
6341 return VERR_HM_UNEXPECTED_LD_ST_MSR;
6342 }
6343 }
6344 }
6345
6346 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS);
6347 return VINF_SUCCESS;
6348}
6349
6350
6351/**
6352 * Saves the guest control registers from the current VMCS into the guest-CPU
6353 * context.
6354 *
6355 * @returns VBox status code.
6356 * @param pVCpu Pointer to the VMCPU.
6357 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6358 * out-of-sync. Make sure to update the required fields
6359 * before using them.
6360 *
6361 * @remarks No-long-jump zone!!!
6362 */
6363static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6364{
6365 /* Guest CR0. Guest FPU. */
6366 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6367 AssertRCReturn(rc, rc);
6368
6369 /* Guest CR4. */
6370 rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
6371 AssertRCReturn(rc, rc);
6372
6373 /* Guest CR2 - updated always during the world-switch or in #PF. */
6374 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
6375 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3))
6376 {
6377 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
6378 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4));
6379
6380 PVM pVM = pVCpu->CTX_SUFF(pVM);
6381 if ( pVM->hm.s.vmx.fUnrestrictedGuest
6382 || ( pVM->hm.s.fNestedPaging
6383 && CPUMIsGuestPagingEnabledEx(pMixedCtx)))
6384 {
6385 uint64_t u64Val = 0;
6386 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
6387 if (pMixedCtx->cr3 != u64Val)
6388 {
6389 CPUMSetGuestCR3(pVCpu, u64Val);
6390 if (VMMRZCallRing3IsEnabled(pVCpu))
6391 {
6392 PGMUpdateCR3(pVCpu, u64Val);
6393 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6394 }
6395 else
6396 {
6397 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
6398 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
6399 }
6400 }
6401
6402 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
6403 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */
6404 {
6405 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
6406 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
6407 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
6408 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
6409
6410 if (VMMRZCallRing3IsEnabled(pVCpu))
6411 {
6412 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6413 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6414 }
6415 else
6416 {
6417 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
6418 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
6419 }
6420 }
6421 }
6422
6423 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3);
6424 }
6425
6426 /*
6427 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
6428 * -> VMMRZCallRing3Disable() -> hmR0VmxSaveGuestState() -> Set VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
6429 * -> continue with VM-exit handling -> hmR0VmxSaveGuestControlRegs() and here we are.
6430 *
6431 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
6432 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
6433 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
6434 * -NOT- check if HMVMX_UPDATED_GUEST_CR3 is already set or not!
6435 *
6436 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
6437 */
6438 if (VMMRZCallRing3IsEnabled(pVCpu))
6439 {
6440 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6441 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
6442
6443 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6444 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6445
6446 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6447 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6448 }
6449
6450 return rc;
6451}
6452
6453
6454/**
6455 * Reads a guest segment register from the current VMCS into the guest-CPU
6456 * context.
6457 *
6458 * @returns VBox status code.
6459 * @param pVCpu Pointer to the VMCPU.
6460 * @param idxSel Index of the selector in the VMCS.
6461 * @param idxLimit Index of the segment limit in the VMCS.
6462 * @param idxBase Index of the segment base in the VMCS.
6463 * @param idxAccess Index of the access rights of the segment in the VMCS.
6464 * @param pSelReg Pointer to the segment selector.
6465 *
6466 * @remarks No-long-jump zone!!!
6467 * @remarks Never call this function directly!!! Use the VMXLOCAL_READ_SEG()
6468 * macro as that takes care of whether to read from the VMCS cache or
6469 * not.
6470 */
6471DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
6472 PCPUMSELREG pSelReg)
6473{
6474 NOREF(pVCpu);
6475
6476 uint32_t u32Val = 0;
6477 int rc = VMXReadVmcs32(idxSel, &u32Val);
6478 AssertRCReturn(rc, rc);
6479 pSelReg->Sel = (uint16_t)u32Val;
6480 pSelReg->ValidSel = (uint16_t)u32Val;
6481 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6482
6483 rc = VMXReadVmcs32(idxLimit, &u32Val);
6484 AssertRCReturn(rc, rc);
6485 pSelReg->u32Limit = u32Val;
6486
6487 uint64_t u64Val = 0;
6488 rc = VMXReadVmcsGstNByIdxVal(idxBase, &u64Val);
6489 AssertRCReturn(rc, rc);
6490 pSelReg->u64Base = u64Val;
6491
6492 rc = VMXReadVmcs32(idxAccess, &u32Val);
6493 AssertRCReturn(rc, rc);
6494 pSelReg->Attr.u = u32Val;
6495
6496 /*
6497 * If VT-x marks the segment as unusable, most other bits remain undefined:
6498 * - For CS the L, D and G bits have meaning.
6499 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
6500 * - For the remaining data segments no bits are defined.
6501 *
6502 * The present bit and the unusable bit has been observed to be set at the
6503 * same time (the selector was supposed to be invalid as we started executing
6504 * a V8086 interrupt in ring-0).
6505 *
6506 * What should be important for the rest of the VBox code, is that the P bit is
6507 * cleared. Some of the other VBox code recognizes the unusable bit, but
6508 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
6509 * safe side here, we'll strip off P and other bits we don't care about. If
6510 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
6511 *
6512 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
6513 */
6514 if (pSelReg->Attr.u & X86DESCATTR_UNUSABLE)
6515 {
6516 Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR); /* TR is the only selector that can never be unusable. */
6517
6518 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
6519 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
6520 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
6521
6522 Log4(("hmR0VmxReadSegmentReg: Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Val, pSelReg->Attr.u));
6523#ifdef DEBUG_bird
6524 AssertMsg((u32Val & ~X86DESCATTR_P) == pSelReg->Attr.u,
6525 ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
6526 idxSel, u32Val, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
6527#endif
6528 }
6529 return VINF_SUCCESS;
6530}
6531
6532
6533#ifdef VMX_USE_CACHED_VMCS_ACCESSES
6534# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
6535 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
6536 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
6537#else
6538# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
6539 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
6540 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
6541#endif
6542
6543
6544/**
6545 * Saves the guest segment registers from the current VMCS into the guest-CPU
6546 * context.
6547 *
6548 * @returns VBox status code.
6549 * @param pVCpu Pointer to the VMCPU.
6550 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6551 * out-of-sync. Make sure to update the required fields
6552 * before using them.
6553 *
6554 * @remarks No-long-jump zone!!!
6555 */
6556static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6557{
6558 /* Guest segment registers. */
6559 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS))
6560 {
6561 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); AssertRCReturn(rc, rc);
6562 rc = VMXLOCAL_READ_SEG(CS, cs); AssertRCReturn(rc, rc);
6563 rc = VMXLOCAL_READ_SEG(SS, ss); AssertRCReturn(rc, rc);
6564 rc = VMXLOCAL_READ_SEG(DS, ds); AssertRCReturn(rc, rc);
6565 rc = VMXLOCAL_READ_SEG(ES, es); AssertRCReturn(rc, rc);
6566 rc = VMXLOCAL_READ_SEG(FS, fs); AssertRCReturn(rc, rc);
6567 rc = VMXLOCAL_READ_SEG(GS, gs); AssertRCReturn(rc, rc);
6568
6569 /* Restore segment attributes for real-on-v86 mode hack. */
6570 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6571 {
6572 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
6573 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
6574 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
6575 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
6576 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
6577 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
6578 }
6579 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS);
6580 }
6581
6582 return VINF_SUCCESS;
6583}
6584
6585
6586/**
6587 * Saves the guest descriptor table registers and task register from the current
6588 * VMCS into the guest-CPU context.
6589 *
6590 * @returns VBox status code.
6591 * @param pVCpu Pointer to the VMCPU.
6592 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6593 * out-of-sync. Make sure to update the required fields
6594 * before using them.
6595 *
6596 * @remarks No-long-jump zone!!!
6597 */
6598static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6599{
6600 int rc = VINF_SUCCESS;
6601
6602 /* Guest LDTR. */
6603 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR))
6604 {
6605 rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
6606 AssertRCReturn(rc, rc);
6607 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR);
6608 }
6609
6610 /* Guest GDTR. */
6611 uint64_t u64Val = 0;
6612 uint32_t u32Val = 0;
6613 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR))
6614 {
6615 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
6616 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
6617 pMixedCtx->gdtr.pGdt = u64Val;
6618 pMixedCtx->gdtr.cbGdt = u32Val;
6619 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR);
6620 }
6621
6622 /* Guest IDTR. */
6623 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR))
6624 {
6625 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
6626 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
6627 pMixedCtx->idtr.pIdt = u64Val;
6628 pMixedCtx->idtr.cbIdt = u32Val;
6629 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR);
6630 }
6631
6632 /* Guest TR. */
6633 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR))
6634 {
6635 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6636 AssertRCReturn(rc, rc);
6637
6638 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
6639 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6640 {
6641 rc = VMXLOCAL_READ_SEG(TR, tr);
6642 AssertRCReturn(rc, rc);
6643 }
6644 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR);
6645 }
6646 return rc;
6647}
6648
6649#undef VMXLOCAL_READ_SEG
6650
6651
6652/**
6653 * Saves the guest debug-register DR7 from the current VMCS into the guest-CPU
6654 * context.
6655 *
6656 * @returns VBox status code.
6657 * @param pVCpu Pointer to the VMCPU.
6658 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6659 * out-of-sync. Make sure to update the required fields
6660 * before using them.
6661 *
6662 * @remarks No-long-jump zone!!!
6663 */
6664static int hmR0VmxSaveGuestDR7(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6665{
6666 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG))
6667 {
6668 if (!pVCpu->hm.s.fUsingHyperDR7)
6669 {
6670 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
6671 uint32_t u32Val;
6672 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); AssertRCReturn(rc, rc);
6673 pMixedCtx->dr[7] = u32Val;
6674 }
6675
6676 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG);
6677 }
6678 return VINF_SUCCESS;
6679}
6680
6681
6682/**
6683 * Saves the guest APIC state from the current VMCS into the guest-CPU context.
6684 *
6685 * @returns VBox status code.
6686 * @param pVCpu Pointer to the VMCPU.
6687 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6688 * out-of-sync. Make sure to update the required fields
6689 * before using them.
6690 *
6691 * @remarks No-long-jump zone!!!
6692 */
6693static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6694{
6695 NOREF(pMixedCtx);
6696
6697 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
6698 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_APIC_STATE);
6699 return VINF_SUCCESS;
6700}
6701
6702
6703/**
6704 * Saves the entire guest state from the currently active VMCS into the
6705 * guest-CPU context.
6706 *
6707 * This essentially VMREADs all guest-data.
6708 *
6709 * @returns VBox status code.
6710 * @param pVCpu Pointer to the VMCPU.
6711 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6712 * out-of-sync. Make sure to update the required fields
6713 * before using them.
6714 */
6715static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6716{
6717 Assert(pVCpu);
6718 Assert(pMixedCtx);
6719
6720 if (HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL)
6721 return VINF_SUCCESS;
6722
6723 /* Though we can longjmp to ring-3 due to log-flushes here and get recalled
6724 again on the ring-3 callback path, there is no real need to. */
6725 if (VMMRZCallRing3IsEnabled(pVCpu))
6726 VMMR0LogFlushDisable(pVCpu);
6727 else
6728 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6729 Log4Func(("vcpu[%RU32]\n", pVCpu->idCpu));
6730
6731 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
6732 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6733
6734 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6735 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6736
6737 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6738 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6739
6740 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
6741 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6742
6743 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
6744 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDR7 failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6745
6746 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
6747 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6748
6749 rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
6750 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestLazyMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6751
6752 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
6753 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6754
6755 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
6756 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6757
6758 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
6759 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestApicState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6760
6761 AssertMsg(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL,
6762 ("Missed guest state bits while saving state; residue %RX32\n", HMVMXCPU_GST_VALUE(pVCpu)));
6763
6764 if (VMMRZCallRing3IsEnabled(pVCpu))
6765 VMMR0LogFlushEnable(pVCpu);
6766
6767 return VINF_SUCCESS;
6768}
6769
6770
6771/**
6772 * Saves basic guest registers needed for IEM instruction execution.
6773 *
6774 * @returns VBox status code (OR-able).
6775 * @param pVCpu Pointer to the cross context CPU data for the calling
6776 * EMT.
6777 * @param pMixedCtx Pointer to the CPU context of the guest.
6778 * @param fMemory Whether the instruction being executed operates on
6779 * memory or not. Only CR0 is synced up if clear.
6780 * @param fNeedRsp Need RSP (any instruction working on GPRs or stack).
6781 */
6782static int hmR0VmxSaveGuestRegsForIemExec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fMemory, bool fNeedRsp)
6783{
6784 /*
6785 * We assume all general purpose registers other than RSP are available.
6786 *
6787 * RIP is a must as it will be incremented or otherwise changed.
6788 *
6789 * RFLAGS are always required to figure the CPL.
6790 *
6791 * RSP isn't always required, however it's a GPR so frequently required.
6792 *
6793 * SS and CS are the only segment register needed if IEM doesn't do memory
6794 * access (CPL + 16/32/64-bit mode), but we can only get all segment registers.
6795 *
6796 * CR0 is always required by IEM for the CPL, while CR3 and CR4 will only
6797 * be required for memory accesses.
6798 *
6799 * Note! Before IEM dispatches an exception, it will call us to sync in everything.
6800 */
6801 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6802 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6803 if (fNeedRsp)
6804 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
6805 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6806 if (!fMemory)
6807 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6808 else
6809 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6810 return rc;
6811}
6812
6813
6814/**
6815 * Ensures that we've got a complete basic context.
6816 *
6817 * This excludes the FPU, SSE, AVX, and similar extended state. The interface
6818 * is for the interpreter.
6819 *
6820 * @returns VBox status code.
6821 * @param pVCpu Pointer to the VMCPU of the calling EMT.
6822 * @param pMixedCtx Pointer to the guest-CPU context which may have data
6823 * needing to be synced in.
6824 * @thread EMT(pVCpu)
6825 */
6826VMMR0_INT_DECL(int) HMR0EnsureCompleteBasicContext(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6827{
6828 /* Note! Since this is only applicable to VT-x, the implementation is placed
6829 in the VT-x part of the sources instead of the generic stuff. */
6830 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
6831 return hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6832 return VINF_SUCCESS;
6833}
6834
6835
6836/**
6837 * Check per-VM and per-VCPU force flag actions that require us to go back to
6838 * ring-3 for one reason or another.
6839 *
6840 * @returns VBox status code (information status code included).
6841 * @retval VINF_SUCCESS if we don't have any actions that require going back to
6842 * ring-3.
6843 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
6844 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
6845 * interrupts)
6846 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
6847 * all EMTs to be in ring-3.
6848 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
6849 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
6850 * to the EM loop.
6851 *
6852 * @param pVM Pointer to the VM.
6853 * @param pVCpu Pointer to the VMCPU.
6854 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6855 * out-of-sync. Make sure to update the required fields
6856 * before using them.
6857 */
6858static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6859{
6860 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6861
6862 if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
6863 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
6864 || VMCPU_FF_IS_PENDING(pVCpu, !pVCpu->hm.s.fSingleInstruction
6865 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
6866 {
6867 /* We need the control registers now, make sure the guest-CPU context is updated. */
6868 int rc3 = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6869 AssertRCReturn(rc3, rc3);
6870
6871 /* Pending HM CR3 sync. */
6872 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6873 {
6874 int rc2 = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
6875 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
6876 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
6877 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6878 }
6879
6880 /* Pending HM PAE PDPEs. */
6881 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6882 {
6883 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6884 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6885 }
6886
6887 /* Pending PGM C3 sync. */
6888 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
6889 {
6890 int rc2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4,
6891 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
6892 if (rc2 != VINF_SUCCESS)
6893 {
6894 AssertRC(rc2);
6895 Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", rc2));
6896 return rc2;
6897 }
6898 }
6899
6900 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
6901 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
6902 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
6903 {
6904 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
6905 int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
6906 Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
6907 return rc2;
6908 }
6909
6910 /* Pending VM request packets, such as hardware interrupts. */
6911 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
6912 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
6913 {
6914 Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
6915 return VINF_EM_PENDING_REQUEST;
6916 }
6917
6918 /* Pending PGM pool flushes. */
6919 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
6920 {
6921 Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
6922 return VINF_PGM_POOL_FLUSH_PENDING;
6923 }
6924
6925 /* Pending DMA requests. */
6926 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
6927 {
6928 Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
6929 return VINF_EM_RAW_TO_R3;
6930 }
6931 }
6932
6933 return VINF_SUCCESS;
6934}
6935
6936
6937/**
6938 * Converts any TRPM trap into a pending HM event. This is typically used when
6939 * entering from ring-3 (not longjmp returns).
6940 *
6941 * @param pVCpu Pointer to the VMCPU.
6942 */
6943static void hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
6944{
6945 Assert(TRPMHasTrap(pVCpu));
6946 Assert(!pVCpu->hm.s.Event.fPending);
6947
6948 uint8_t uVector;
6949 TRPMEVENT enmTrpmEvent;
6950 RTGCUINT uErrCode;
6951 RTGCUINTPTR GCPtrFaultAddress;
6952 uint8_t cbInstr;
6953
6954 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
6955 AssertRC(rc);
6956
6957 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntInfo. */
6958 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
6959 if (enmTrpmEvent == TRPM_TRAP)
6960 {
6961 switch (uVector)
6962 {
6963 case X86_XCPT_NMI:
6964 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6965 break;
6966
6967 case X86_XCPT_BP:
6968 case X86_XCPT_OF:
6969 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6970 break;
6971
6972 case X86_XCPT_PF:
6973 case X86_XCPT_DF:
6974 case X86_XCPT_TS:
6975 case X86_XCPT_NP:
6976 case X86_XCPT_SS:
6977 case X86_XCPT_GP:
6978 case X86_XCPT_AC:
6979 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6980 /* no break! */
6981 default:
6982 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6983 break;
6984 }
6985 }
6986 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
6987 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6988 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
6989 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6990 else
6991 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
6992
6993 rc = TRPMResetTrap(pVCpu);
6994 AssertRC(rc);
6995 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
6996 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
6997
6998 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
6999 STAM_COUNTER_DEC(&pVCpu->hm.s.StatInjectPendingReflect);
7000}
7001
7002
7003/**
7004 * Converts any pending HM event into a TRPM trap. Typically used when leaving
7005 * VT-x to execute any instruction.
7006 *
7007 * @param pvCpu Pointer to the VMCPU.
7008 */
7009static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
7010{
7011 Assert(pVCpu->hm.s.Event.fPending);
7012
7013 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
7014 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo);
7015 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntInfo);
7016 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
7017
7018 /* If a trap was already pending, we did something wrong! */
7019 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
7020
7021 TRPMEVENT enmTrapType;
7022 switch (uVectorType)
7023 {
7024 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
7025 enmTrapType = TRPM_HARDWARE_INT;
7026 break;
7027
7028 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
7029 enmTrapType = TRPM_SOFTWARE_INT;
7030 break;
7031
7032 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
7033 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
7034 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
7035 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
7036 enmTrapType = TRPM_TRAP;
7037 break;
7038
7039 default:
7040 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
7041 enmTrapType = TRPM_32BIT_HACK;
7042 break;
7043 }
7044
7045 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
7046
7047 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
7048 AssertRC(rc);
7049
7050 if (fErrorCodeValid)
7051 TRPMSetErrorCode(pVCpu, uErrorCode);
7052
7053 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
7054 && uVector == X86_XCPT_PF)
7055 {
7056 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
7057 }
7058 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
7059 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
7060 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
7061 {
7062 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
7063 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
7064 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
7065 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
7066 }
7067 pVCpu->hm.s.Event.fPending = false;
7068}
7069
7070
7071/**
7072 * Does the necessary state syncing before returning to ring-3 for any reason
7073 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
7074 *
7075 * @returns VBox status code.
7076 * @param pVM Pointer to the VM.
7077 * @param pVCpu Pointer to the VMCPU.
7078 * @param pMixedCtx Pointer to the guest-CPU context. The data may
7079 * be out-of-sync. Make sure to update the required
7080 * fields before using them.
7081 * @param fSaveGuestState Whether to save the guest state or not.
7082 *
7083 * @remarks No-long-jmp zone!!!
7084 */
7085static int hmR0VmxLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fSaveGuestState)
7086{
7087 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7088 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7089
7090 RTCPUID idCpu = RTMpCpuId();
7091 Log4Func(("HostCpuId=%u\n", idCpu));
7092
7093 /*
7094 * !!! IMPORTANT !!!
7095 * If you modify code here, make sure to check whether hmR0VmxCallRing3Callback() needs to be updated too.
7096 */
7097
7098 /* Save the guest state if necessary. */
7099 if ( fSaveGuestState
7100 && HMVMXCPU_GST_VALUE(pVCpu) != HMVMX_UPDATED_GUEST_ALL)
7101 {
7102 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
7103 AssertRCReturn(rc, rc);
7104 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
7105 }
7106
7107 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
7108 if (CPUMIsGuestFPUStateActive(pVCpu))
7109 {
7110 /* We shouldn't reload CR0 without saving it first. */
7111 if (!fSaveGuestState)
7112 {
7113 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7114 AssertRCReturn(rc, rc);
7115 }
7116 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
7117 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
7118 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
7119 }
7120
7121 /* Restore host debug registers if necessary and resync on next R0 reentry. */
7122#ifdef VBOX_STRICT
7123 if (CPUMIsHyperDebugStateActive(pVCpu))
7124 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
7125#endif
7126 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
7127 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
7128 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu));
7129 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu));
7130
7131#if HC_ARCH_BITS == 64
7132 /* Restore host-state bits that VT-x only restores partially. */
7133 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
7134 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
7135 {
7136 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu));
7137 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
7138 }
7139 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
7140#endif
7141
7142#if HC_ARCH_BITS == 64
7143 /* Restore the lazy host MSRs as we're leaving VT-x context. */
7144 if ( pVM->hm.s.fAllow64BitGuests
7145 && pVCpu->hm.s.vmx.fLazyMsrs)
7146 {
7147 /* We shouldn't reload the guest MSRs without saving it first. */
7148 if (!fSaveGuestState)
7149 {
7150 int rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
7151 AssertRCReturn(rc, rc);
7152 }
7153 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS));
7154 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7155 Assert(!pVCpu->hm.s.vmx.fLazyMsrs);
7156 }
7157#endif
7158
7159 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
7160 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7161
7162 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
7163 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
7164 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
7165 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
7166 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
7167 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
7168 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
7169 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7170
7171 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7172
7173 /** @todo This partially defeats the purpose of having preemption hooks.
7174 * The problem is, deregistering the hooks should be moved to a place that
7175 * lasts until the EMT is about to be destroyed not everytime while leaving HM
7176 * context.
7177 */
7178 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7179 {
7180 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7181 AssertRCReturn(rc, rc);
7182
7183 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7184 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
7185 }
7186 Assert(!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
7187 NOREF(idCpu);
7188
7189 return VINF_SUCCESS;
7190}
7191
7192
7193/**
7194 * Leaves the VT-x session.
7195 *
7196 * @returns VBox status code.
7197 * @param pVM Pointer to the VM.
7198 * @param pVCpu Pointer to the VMCPU.
7199 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7200 * out-of-sync. Make sure to update the required fields
7201 * before using them.
7202 *
7203 * @remarks No-long-jmp zone!!!
7204 */
7205DECLINLINE(int) hmR0VmxLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7206{
7207 HM_DISABLE_PREEMPT();
7208 HMVMX_ASSERT_CPU_SAFE();
7209 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7210 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7211
7212 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
7213 and done this from the VMXR0ThreadCtxCallback(). */
7214 if (!pVCpu->hm.s.fLeaveDone)
7215 {
7216 int rc2 = hmR0VmxLeave(pVM, pVCpu, pMixedCtx, true /* fSaveGuestState */);
7217 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2);
7218 pVCpu->hm.s.fLeaveDone = true;
7219 }
7220 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
7221
7222 /*
7223 * !!! IMPORTANT !!!
7224 * If you modify code here, make sure to check whether hmR0VmxCallRing3Callback() needs to be updated too.
7225 */
7226
7227 /* Deregister hook now that we've left HM context before re-enabling preemption. */
7228 /** @todo Deregistering here means we need to VMCLEAR always
7229 * (longjmp/exit-to-r3) in VT-x which is not efficient. */
7230 VMMR0ThreadCtxHooksDeregister(pVCpu);
7231
7232 /* Leave HM context. This takes care of local init (term). */
7233 int rc = HMR0LeaveCpu(pVCpu);
7234
7235 HM_RESTORE_PREEMPT();
7236 return rc;
7237}
7238
7239
7240/**
7241 * Does the necessary state syncing before doing a longjmp to ring-3.
7242 *
7243 * @returns VBox status code.
7244 * @param pVM Pointer to the VM.
7245 * @param pVCpu Pointer to the VMCPU.
7246 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7247 * out-of-sync. Make sure to update the required fields
7248 * before using them.
7249 *
7250 * @remarks No-long-jmp zone!!!
7251 */
7252DECLINLINE(int) hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7253{
7254 return hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
7255}
7256
7257
7258/**
7259 * Take necessary actions before going back to ring-3.
7260 *
7261 * An action requires us to go back to ring-3. This function does the necessary
7262 * steps before we can safely return to ring-3. This is not the same as longjmps
7263 * to ring-3, this is voluntary and prepares the guest so it may continue
7264 * executing outside HM (recompiler/IEM).
7265 *
7266 * @returns VBox status code.
7267 * @param pVM Pointer to the VM.
7268 * @param pVCpu Pointer to the VMCPU.
7269 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7270 * out-of-sync. Make sure to update the required fields
7271 * before using them.
7272 * @param rcExit The reason for exiting to ring-3. Can be
7273 * VINF_VMM_UNKNOWN_RING3_CALL.
7274 */
7275static int hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
7276{
7277 Assert(pVM);
7278 Assert(pVCpu);
7279 Assert(pMixedCtx);
7280 HMVMX_ASSERT_PREEMPT_SAFE();
7281
7282 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
7283 {
7284 VMXGetActivatedVmcs(&pVCpu->hm.s.vmx.LastError.u64VMCSPhys);
7285 pVCpu->hm.s.vmx.LastError.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
7286 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
7287 /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */
7288 }
7289
7290 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
7291 VMMRZCallRing3Disable(pVCpu);
7292 Log4(("hmR0VmxExitToRing3: pVCpu=%p idCpu=%RU32 rcExit=%d\n", pVCpu, pVCpu->idCpu, rcExit));
7293
7294 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
7295 if (pVCpu->hm.s.Event.fPending)
7296 {
7297 hmR0VmxPendingEventToTrpmTrap(pVCpu);
7298 Assert(!pVCpu->hm.s.Event.fPending);
7299 }
7300
7301 /* If we're emulating an instruction, we shouldn't have any TRPM traps pending
7302 and if we're injecting an event we should have a TRPM trap pending. */
7303 Assert(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu));
7304 Assert(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu));
7305
7306 /* Save guest state and restore host state bits. */
7307 int rc = hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
7308 AssertRCReturn(rc, rc);
7309 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7310 /* Thread-context hooks are unregistered at this point!!! */
7311
7312 /* Sync recompiler state. */
7313 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
7314 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
7315 | CPUM_CHANGED_LDTR
7316 | CPUM_CHANGED_GDTR
7317 | CPUM_CHANGED_IDTR
7318 | CPUM_CHANGED_TR
7319 | CPUM_CHANGED_HIDDEN_SEL_REGS);
7320 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
7321 if ( pVM->hm.s.fNestedPaging
7322 && CPUMIsGuestPagingEnabledEx(pMixedCtx))
7323 {
7324 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
7325 }
7326
7327 Assert(!pVCpu->hm.s.fClearTrapFlag);
7328
7329 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
7330 if (rcExit != VINF_EM_RAW_INTERRUPT)
7331 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
7332
7333 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
7334
7335 /* We do -not- want any longjmp notifications after this! We must return to ring-3 ASAP. */
7336 VMMRZCallRing3RemoveNotification(pVCpu);
7337 VMMRZCallRing3Enable(pVCpu);
7338
7339 return rc;
7340}
7341
7342
7343/**
7344 * VMMRZCallRing3() callback wrapper which saves the guest state before we
7345 * longjump to ring-3 and possibly get preempted.
7346 *
7347 * @returns VBox status code.
7348 * @param pVCpu Pointer to the VMCPU.
7349 * @param enmOperation The operation causing the ring-3 longjump.
7350 * @param pvUser Opaque pointer to the guest-CPU context. The data
7351 * may be out-of-sync. Make sure to update the required
7352 * fields before using them.
7353 */
7354DECLCALLBACK(int) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
7355{
7356 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
7357 {
7358 /*
7359 * !!! IMPORTANT !!!
7360 * If you modify code here, make sure to check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs
7361 * to be updated too. This is a stripped down version which gets out ASAP trying to not trigger any assertion.
7362 */
7363 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; \
7364 VMMRZCallRing3RemoveNotification(pVCpu);
7365 VMMRZCallRing3Disable(pVCpu);
7366 RTThreadPreemptDisable(&PreemptState);
7367
7368 PVM pVM = pVCpu->CTX_SUFF(pVM);
7369 if (CPUMIsGuestFPUStateActive(pVCpu))
7370 CPUMR0SaveGuestFPU(pVM, pVCpu, (PCPUMCTX)pvUser);
7371
7372 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
7373
7374#if HC_ARCH_BITS == 64
7375 /* Restore host-state bits that VT-x only restores partially. */
7376 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
7377 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
7378 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
7379 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
7380
7381 /* Restore the lazy host MSRs as we're leaving VT-x context. */
7382 if ( pVM->hm.s.fAllow64BitGuests
7383 && pVCpu->hm.s.vmx.fLazyMsrs)
7384 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7385#endif
7386 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
7387 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7388 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7389 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7390 {
7391 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7392 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7393 }
7394
7395 VMMR0ThreadCtxHooksDeregister(pVCpu);
7396 HMR0LeaveCpu(pVCpu);
7397 RTThreadPreemptRestore(&PreemptState);
7398 return VINF_SUCCESS;
7399 }
7400
7401 Assert(pVCpu);
7402 Assert(pvUser);
7403 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7404 HMVMX_ASSERT_PREEMPT_SAFE();
7405
7406 VMMRZCallRing3Disable(pVCpu);
7407 Assert(VMMR0IsLogFlushDisabled(pVCpu));
7408
7409 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32 enmOperation=%d\n", pVCpu, pVCpu->idCpu,
7410 enmOperation));
7411
7412 int rc = hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
7413 AssertRCReturn(rc, rc);
7414
7415 VMMRZCallRing3Enable(pVCpu);
7416 return VINF_SUCCESS;
7417}
7418
7419
7420/**
7421 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
7422 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
7423 *
7424 * @param pVCpu Pointer to the VMCPU.
7425 */
7426DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
7427{
7428 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7429 {
7430 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7431 {
7432 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7433 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7434 AssertRC(rc);
7435 Log4(("Setup interrupt-window exiting\n"));
7436 }
7437 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
7438}
7439
7440
7441/**
7442 * Clears the interrupt-window exiting control in the VMCS.
7443 *
7444 * @param pVCpu Pointer to the VMCPU.
7445 */
7446DECLINLINE(void) hmR0VmxClearIntWindowExitVmcs(PVMCPU pVCpu)
7447{
7448 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7449 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7450 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7451 AssertRC(rc);
7452 Log4(("Cleared interrupt-window exiting\n"));
7453}
7454
7455
7456/**
7457 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
7458 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
7459 *
7460 * @param pVCpu Pointer to the VMCPU.
7461 */
7462DECLINLINE(void) hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu)
7463{
7464 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7465 {
7466 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7467 {
7468 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7469 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7470 AssertRC(rc);
7471 Log4(("Setup NMI-window exiting\n"));
7472 }
7473 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
7474}
7475
7476
7477/**
7478 * Clears the NMI-window exiting control in the VMCS.
7479 *
7480 * @param pVCpu Pointer to the VMCPU.
7481 */
7482DECLINLINE(void) hmR0VmxClearNmiWindowExitVmcs(PVMCPU pVCpu)
7483{
7484 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT);
7485 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7486 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7487 AssertRC(rc);
7488 Log4(("Cleared NMI-window exiting\n"));
7489}
7490
7491
7492/**
7493 * Evaluates the event to be delivered to the guest and sets it as the pending
7494 * event.
7495 *
7496 * @param pVCpu Pointer to the VMCPU.
7497 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7498 * out-of-sync. Make sure to update the required fields
7499 * before using them.
7500 */
7501static void hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7502{
7503 Assert(!pVCpu->hm.s.Event.fPending);
7504
7505 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
7506 uint32_t const uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7507 bool const fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7508 bool const fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7509 bool const fBlockNmi = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
7510
7511 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7512 Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7513 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7514 Assert(!TRPMHasTrap(pVCpu));
7515
7516 /*
7517 * Toggling of interrupt force-flags here is safe since we update TRPM on premature exits
7518 * to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must NOT restore these force-flags.
7519 */
7520 /** @todo SMI. SMIs take priority over NMIs. */
7521 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */
7522 {
7523 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
7524 if ( !fBlockNmi
7525 && !fBlockSti
7526 && !fBlockMovSS)
7527 {
7528 Log4(("Pending NMI vcpu[%RU32]\n", pVCpu->idCpu));
7529 uint32_t u32IntInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
7530 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7531
7532 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7533 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
7534 }
7535 else
7536 hmR0VmxSetNmiWindowExitVmcs(pVCpu);
7537 }
7538 /*
7539 * Check if the guest can receive external interrupts (PIC/APIC). Once we do PDMGetInterrupt() we -must- deliver
7540 * the interrupt ASAP. We must not execute any guest code until we inject the interrupt.
7541 */
7542 else if ( VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
7543 && !pVCpu->hm.s.fSingleInstruction)
7544 {
7545 int rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7546 AssertRC(rc);
7547 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7548 if ( !fBlockInt
7549 && !fBlockSti
7550 && !fBlockMovSS)
7551 {
7552 uint8_t u8Interrupt;
7553 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
7554 if (RT_SUCCESS(rc))
7555 {
7556 Log4(("Pending interrupt vcpu[%RU32] u8Interrupt=%#x \n", pVCpu->idCpu, u8Interrupt));
7557 uint32_t u32IntInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID;
7558 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7559
7560 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrfaultAddress */);
7561 }
7562 else
7563 {
7564 /** @todo Does this actually happen? If not turn it into an assertion. */
7565 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
7566 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
7567 }
7568 }
7569 else
7570 hmR0VmxSetIntWindowExitVmcs(pVCpu);
7571 }
7572}
7573
7574
7575/**
7576 * Sets a pending-debug exception to be delivered to the guest if the guest is
7577 * single-stepping.
7578 *
7579 * @param pVCpu Pointer to the VMCPU.
7580 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7581 * out-of-sync. Make sure to update the required fields
7582 * before using them.
7583 */
7584DECLINLINE(void) hmR0VmxSetPendingDebugXcpt(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7585{
7586 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7587 if (pMixedCtx->eflags.Bits.u1TF) /* We don't have any IA32_DEBUGCTL MSR for guests. Treat as all bits 0. */
7588 {
7589 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
7590 AssertRC(rc);
7591 }
7592}
7593
7594
7595/**
7596 * Injects any pending events into the guest if the guest is in a state to
7597 * receive them.
7598 *
7599 * @returns VBox status code (informational status codes included).
7600 * @param pVCpu Pointer to the VMCPU.
7601 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7602 * out-of-sync. Make sure to update the required fields
7603 * before using them.
7604 * @param fStepping Running in hmR0VmxRunGuestCodeStep() and we should
7605 * return VINF_EM_DBG_STEPPED if the event was
7606 * dispatched directly.
7607 */
7608static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping)
7609{
7610 HMVMX_ASSERT_PREEMPT_SAFE();
7611 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7612
7613 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
7614 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7615 bool fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7616 bool fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7617
7618 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7619 Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7620 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7621 Assert(!TRPMHasTrap(pVCpu));
7622
7623 int rc = VINF_SUCCESS;
7624 if (pVCpu->hm.s.Event.fPending)
7625 {
7626 /*
7627 * Clear any interrupt-window exiting control if we're going to inject an interrupt. Saves one extra
7628 * VM-exit in situations where we previously setup interrupt-window exiting but got other VM-exits and
7629 * ended up enabling interrupts outside VT-x.
7630 */
7631 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
7632 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
7633 && uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7634 {
7635 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7636 hmR0VmxClearIntWindowExitVmcs(pVCpu);
7637 }
7638
7639#ifdef VBOX_STRICT
7640 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7641 {
7642 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7643 Assert(!fBlockInt);
7644 Assert(!fBlockSti);
7645 Assert(!fBlockMovSS);
7646 }
7647 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
7648 {
7649 bool const fBlockNmi = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
7650 Assert(!fBlockSti);
7651 Assert(!fBlockMovSS);
7652 Assert(!fBlockNmi);
7653 }
7654#endif
7655 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#x\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
7656 (uint8_t)uIntType));
7657 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
7658 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping, &uIntrState);
7659 AssertRCReturn(rc, rc);
7660
7661 /* Update the interruptibility-state as it could have been changed by
7662 hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */
7663 fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7664 fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7665
7666#ifdef VBOX_WITH_STATISTICS
7667 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7668 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
7669 else
7670 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
7671#endif
7672 }
7673
7674 /* Deliver pending debug exception if the guest is single-stepping. Evaluate and set the BS bit. */
7675 if ( fBlockSti
7676 || fBlockMovSS)
7677 {
7678 if ( !pVCpu->hm.s.fSingleInstruction
7679 && !DBGFIsStepping(pVCpu))
7680 {
7681 /*
7682 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD,
7683 * VMX_EXIT_MTF, VMX_EXIT_APIC_WRITE and VMX_EXIT_VIRTUALIZED_EOI.
7684 * See Intel spec. 27.3.4 "Saving Non-Register State".
7685 */
7686 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7687 AssertRCReturn(rc2, rc2);
7688 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
7689 }
7690 else if (pMixedCtx->eflags.Bits.u1TF)
7691 {
7692 /*
7693 * We are single-stepping in the hypervisor debugger using EFLAGS.TF. Clear interrupt inhibition as setting the
7694 * BS bit would mean delivering a #DB to the guest upon VM-entry when it shouldn't be.
7695 */
7696 Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG));
7697 uIntrState = 0;
7698 }
7699 }
7700
7701 /*
7702 * There's no need to clear the VM-entry interruption-information field here if we're not injecting anything.
7703 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7704 */
7705 int rc2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
7706 AssertRC(rc2);
7707
7708 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping));
7709 NOREF(fBlockMovSS); NOREF(fBlockSti);
7710 return rc;
7711}
7712
7713
7714/**
7715 * Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
7716 *
7717 * @param pVCpu Pointer to the VMCPU.
7718 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7719 * out-of-sync. Make sure to update the required fields
7720 * before using them.
7721 */
7722DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7723{
7724 NOREF(pMixedCtx);
7725 uint32_t u32IntInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID;
7726 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7727}
7728
7729
7730/**
7731 * Injects a double-fault (#DF) exception into the VM.
7732 *
7733 * @returns VBox status code (informational status code included).
7734 * @param pVCpu Pointer to the VMCPU.
7735 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7736 * out-of-sync. Make sure to update the required fields
7737 * before using them.
7738 * @param fStepping Whether we're running in hmR0VmxRunGuestCodeStep()
7739 * and should return VINF_EM_DBG_STEPPED if the event
7740 * is injected directly (register modified by us, not
7741 * by hardware on VM-entry).
7742 * @param puIntrState Pointer to the current guest interruptibility-state.
7743 * This interruptibility-state will be updated if
7744 * necessary. This cannot not be NULL.
7745 */
7746DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping, uint32_t *puIntrState)
7747{
7748 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7749 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7750 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7751 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
7752 fStepping, puIntrState);
7753}
7754
7755
7756/**
7757 * Sets a debug (#DB) exception as pending-for-injection into the VM.
7758 *
7759 * @param pVCpu Pointer to the VMCPU.
7760 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7761 * out-of-sync. Make sure to update the required fields
7762 * before using them.
7763 */
7764DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7765{
7766 NOREF(pMixedCtx);
7767 uint32_t u32IntInfo = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID;
7768 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7769 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7770}
7771
7772
7773/**
7774 * Sets an overflow (#OF) exception as pending-for-injection into the VM.
7775 *
7776 * @param pVCpu Pointer to the VMCPU.
7777 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7778 * out-of-sync. Make sure to update the required fields
7779 * before using them.
7780 * @param cbInstr The value of RIP that is to be pushed on the guest
7781 * stack.
7782 */
7783DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
7784{
7785 NOREF(pMixedCtx);
7786 uint32_t u32IntInfo = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7787 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7788 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7789}
7790
7791
7792/**
7793 * Injects a general-protection (#GP) fault into the VM.
7794 *
7795 * @returns VBox status code (informational status code included).
7796 * @param pVCpu Pointer to the VMCPU.
7797 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7798 * out-of-sync. Make sure to update the required fields
7799 * before using them.
7800 * @param fErrorCodeValid Whether the error code is valid (depends on the CPU
7801 * mode, i.e. in real-mode it's not valid).
7802 * @param u32ErrorCode The error code associated with the #GP.
7803 * @param fStepping Whether we're running in
7804 * hmR0VmxRunGuestCodeStep() and should return
7805 * VINF_EM_DBG_STEPPED if the event is injected
7806 * directly (register modified by us, not by
7807 * hardware on VM-entry).
7808 * @param puIntrState Pointer to the current guest interruptibility-state.
7809 * This interruptibility-state will be updated if
7810 * necessary. This cannot not be NULL.
7811 */
7812DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
7813 bool fStepping, uint32_t *puIntrState)
7814{
7815 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7816 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7817 if (fErrorCodeValid)
7818 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7819 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
7820 fStepping, puIntrState);
7821}
7822
7823
7824/**
7825 * Sets a general-protection (#GP) exception as pending-for-injection into the
7826 * VM.
7827 *
7828 * @param pVCpu Pointer to the VMCPU.
7829 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7830 * out-of-sync. Make sure to update the required fields
7831 * before using them.
7832 * @param u32ErrorCode The error code associated with the #GP.
7833 */
7834DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t u32ErrorCode)
7835{
7836 NOREF(pMixedCtx);
7837 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7838 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7839 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7840 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */);
7841}
7842
7843
7844/**
7845 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
7846 *
7847 * @param pVCpu Pointer to the VMCPU.
7848 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7849 * out-of-sync. Make sure to update the required fields
7850 * before using them.
7851 * @param uVector The software interrupt vector number.
7852 * @param cbInstr The value of RIP that is to be pushed on the guest
7853 * stack.
7854 */
7855DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
7856{
7857 NOREF(pMixedCtx);
7858 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
7859 if ( uVector == X86_XCPT_BP
7860 || uVector == X86_XCPT_OF)
7861 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7862 else
7863 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7864 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7865}
7866
7867
7868/**
7869 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
7870 * stack.
7871 *
7872 * @returns VBox status code (information status code included).
7873 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
7874 * @param pVM Pointer to the VM.
7875 * @param pMixedCtx Pointer to the guest-CPU context.
7876 * @param uValue The value to push to the guest stack.
7877 */
7878DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
7879{
7880 /*
7881 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
7882 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
7883 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
7884 */
7885 if (pMixedCtx->sp == 1)
7886 return VINF_EM_RESET;
7887 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
7888 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
7889 AssertRCReturn(rc, rc);
7890 return rc;
7891}
7892
7893
7894/**
7895 * Injects an event into the guest upon VM-entry by updating the relevant fields
7896 * in the VM-entry area in the VMCS.
7897 *
7898 * @returns VBox status code (informational error codes included).
7899 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
7900 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
7901 *
7902 * @param pVCpu Pointer to the VMCPU.
7903 * @param pMixedCtx Pointer to the guest-CPU context. The data may
7904 * be out-of-sync. Make sure to update the required
7905 * fields before using them.
7906 * @param u64IntInfo The VM-entry interruption-information field.
7907 * @param cbInstr The VM-entry instruction length in bytes (for
7908 * software interrupts, exceptions and privileged
7909 * software exceptions).
7910 * @param u32ErrCode The VM-entry exception error code.
7911 * @param GCPtrFaultAddress The page-fault address for #PF exceptions.
7912 * @param puIntrState Pointer to the current guest interruptibility-state.
7913 * This interruptibility-state will be updated if
7914 * necessary. This cannot not be NULL.
7915 * @param fStepping Whether we're running in
7916 * hmR0VmxRunGuestCodeStep() and should return
7917 * VINF_EM_DBG_STEPPED if the event is injected
7918 * directly (register modified by us, not by
7919 * hardware on VM-entry).
7920 *
7921 * @remarks Requires CR0!
7922 * @remarks No-long-jump zone!!!
7923 */
7924static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
7925 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *puIntrState)
7926{
7927 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
7928 AssertMsg(u64IntInfo >> 32 == 0, ("%#RX64\n", u64IntInfo));
7929 Assert(puIntrState);
7930 uint32_t u32IntInfo = (uint32_t)u64IntInfo;
7931
7932 uint32_t const uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntInfo);
7933 uint32_t const uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo);
7934
7935#ifdef VBOX_STRICT
7936 /* Validate the error-code-valid bit for hardware exceptions. */
7937 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT)
7938 {
7939 switch (uVector)
7940 {
7941 case X86_XCPT_PF:
7942 case X86_XCPT_DF:
7943 case X86_XCPT_TS:
7944 case X86_XCPT_NP:
7945 case X86_XCPT_SS:
7946 case X86_XCPT_GP:
7947 case X86_XCPT_AC:
7948 AssertMsg(VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo),
7949 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
7950 /* fallthru */
7951 default:
7952 break;
7953 }
7954 }
7955#endif
7956
7957 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
7958 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
7959 || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
7960
7961 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
7962
7963 /* We require CR0 to check if the guest is in real-mode. */
7964 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7965 AssertRCReturn(rc, rc);
7966
7967 /*
7968 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
7969 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
7970 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
7971 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
7972 */
7973 if (CPUMIsGuestInRealModeEx(pMixedCtx))
7974 {
7975 PVM pVM = pVCpu->CTX_SUFF(pVM);
7976 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
7977 {
7978 Assert(PDMVmmDevHeapIsEnabled(pVM));
7979 Assert(pVM->hm.s.vmx.pRealModeTSS);
7980
7981 /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */
7982 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7983 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
7984 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
7985 AssertRCReturn(rc, rc);
7986 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP));
7987
7988 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
7989 size_t const cbIdtEntry = sizeof(X86IDTR16);
7990 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
7991 {
7992 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
7993 if (uVector == X86_XCPT_DF)
7994 return VINF_EM_RESET;
7995
7996 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
7997 if (uVector == X86_XCPT_GP)
7998 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, puIntrState);
7999
8000 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
8001 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
8002 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */,
8003 fStepping, puIntrState);
8004 }
8005
8006 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
8007 uint16_t uGuestIp = pMixedCtx->ip;
8008 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
8009 {
8010 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
8011 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
8012 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
8013 }
8014 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
8015 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
8016
8017 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
8018 X86IDTR16 IdtEntry;
8019 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
8020 rc = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
8021 AssertRCReturn(rc, rc);
8022
8023 /* Construct the stack frame for the interrupt/exception handler. */
8024 rc = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
8025 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
8026 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
8027 AssertRCReturn(rc, rc);
8028
8029 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
8030 if (rc == VINF_SUCCESS)
8031 {
8032 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
8033 pMixedCtx->rip = IdtEntry.offSel;
8034 pMixedCtx->cs.Sel = IdtEntry.uSel;
8035 pMixedCtx->cs.ValidSel = IdtEntry.uSel;
8036 pMixedCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
8037 if ( uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
8038 && uVector == X86_XCPT_PF)
8039 pMixedCtx->cr2 = GCPtrFaultAddress;
8040
8041 /* If any other guest-state bits are changed here, make sure to update
8042 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */
8043 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS
8044 | HM_CHANGED_GUEST_RIP
8045 | HM_CHANGED_GUEST_RFLAGS
8046 | HM_CHANGED_GUEST_RSP);
8047
8048 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
8049 if (*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
8050 {
8051 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
8052 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
8053 Log4(("Clearing inhibition due to STI.\n"));
8054 *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
8055 }
8056 Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
8057 u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->eflags.u, pMixedCtx->cs.Sel, pMixedCtx->eip));
8058
8059 /* The event has been truly dispatched. Mark it as no longer pending so we don't attempt to 'undo'
8060 it, if we are returning to ring-3 before executing guest code. */
8061 pVCpu->hm.s.Event.fPending = false;
8062
8063 /* Make hmR0VmxPreRunGuest return if we're stepping since we've changed cs:rip. */
8064 if (fStepping)
8065 rc = VINF_EM_DBG_STEPPED;
8066 }
8067 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping));
8068 return rc;
8069 }
8070
8071 /*
8072 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
8073 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
8074 */
8075 u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
8076 }
8077
8078 /* Validate. */
8079 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
8080 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(u32IntInfo)); /* Bit 12 MBZ. */
8081 Assert(!(u32IntInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
8082
8083 /* Inject. */
8084 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
8085 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo))
8086 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
8087 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
8088
8089 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
8090 && uVector == X86_XCPT_PF)
8091 pMixedCtx->cr2 = GCPtrFaultAddress;
8092
8093 Log4(("Injecting vcpu[%RU32] u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", pVCpu->idCpu,
8094 u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
8095
8096 AssertRCReturn(rc, rc);
8097 return rc;
8098}
8099
8100
8101/**
8102 * Clears the interrupt-window exiting control in the VMCS and if necessary
8103 * clears the current event in the VMCS as well.
8104 *
8105 * @returns VBox status code.
8106 * @param pVCpu Pointer to the VMCPU.
8107 *
8108 * @remarks Use this function only to clear events that have not yet been
8109 * delivered to the guest but are injected in the VMCS!
8110 * @remarks No-long-jump zone!!!
8111 */
8112static void hmR0VmxClearEventVmcs(PVMCPU pVCpu)
8113{
8114 int rc;
8115 Log4Func(("vcpu[%d]\n", pVCpu->idCpu));
8116
8117 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
8118 {
8119 hmR0VmxClearIntWindowExitVmcs(pVCpu);
8120 Assert(!pVCpu->hm.s.Event.fPending);
8121 }
8122
8123 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)
8124 {
8125 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
8126 Assert(!pVCpu->hm.s.Event.fPending);
8127 }
8128
8129 if (!pVCpu->hm.s.Event.fPending)
8130 return;
8131
8132#ifdef VBOX_STRICT
8133 uint32_t u32EntryInfo;
8134 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
8135 AssertRC(rc);
8136 Assert(VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo));
8137#endif
8138
8139 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0);
8140 AssertRC(rc);
8141
8142 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
8143 AssertRC(rc);
8144
8145 /* We deliberately don't clear "hm.s.Event.fPending" here, it's taken
8146 care of in hmR0VmxExitToRing3() converting the pending event to TRPM. */
8147}
8148
8149
8150/**
8151 * Enters the VT-x session.
8152 *
8153 * @returns VBox status code.
8154 * @param pVM Pointer to the VM.
8155 * @param pVCpu Pointer to the VMCPU.
8156 * @param pCpu Pointer to the CPU info struct.
8157 */
8158VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
8159{
8160 AssertPtr(pVM);
8161 AssertPtr(pVCpu);
8162 Assert(pVM->hm.s.vmx.fSupported);
8163 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8164 NOREF(pCpu); NOREF(pVM);
8165
8166 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8167 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
8168
8169#ifdef VBOX_STRICT
8170 /* Make sure we're in VMX root mode. */
8171 RTCCUINTREG u32HostCR4 = ASMGetCR4();
8172 if (!(u32HostCR4 & X86_CR4_VMXE))
8173 {
8174 LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
8175 return VERR_VMX_X86_CR4_VMXE_CLEARED;
8176 }
8177#endif
8178
8179 /*
8180 * Load the VCPU's VMCS as the current (and active) one.
8181 */
8182 Assert(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR);
8183 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8184 if (RT_FAILURE(rc))
8185 return rc;
8186
8187 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8188 pVCpu->hm.s.fLeaveDone = false;
8189 Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8190
8191 return VINF_SUCCESS;
8192}
8193
8194
8195/**
8196 * The thread-context callback (only on platforms which support it).
8197 *
8198 * @param enmEvent The thread-context event.
8199 * @param pVCpu Pointer to the VMCPU.
8200 * @param fGlobalInit Whether global VT-x/AMD-V init. was used.
8201 * @thread EMT(pVCpu)
8202 */
8203VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
8204{
8205 NOREF(fGlobalInit);
8206
8207 switch (enmEvent)
8208 {
8209 case RTTHREADCTXEVENT_PREEMPTING:
8210 {
8211 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8212 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
8213 VMCPU_ASSERT_EMT(pVCpu);
8214
8215 PVM pVM = pVCpu->CTX_SUFF(pVM);
8216 PCPUMCTX pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu);
8217
8218 /* No longjmps (logger flushes, locks) in this fragile context. */
8219 VMMRZCallRing3Disable(pVCpu);
8220 Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId()));
8221
8222 /*
8223 * Restore host-state (FPU, debug etc.)
8224 */
8225 if (!pVCpu->hm.s.fLeaveDone)
8226 {
8227 /* Do -not- save guest-state here as we might already be in the middle of saving it (esp. bad if we are
8228 holding the PGM lock while saving the guest state (see hmR0VmxSaveGuestControlRegs()). */
8229 hmR0VmxLeave(pVM, pVCpu, pMixedCtx, false /* fSaveGuestState */);
8230 pVCpu->hm.s.fLeaveDone = true;
8231 }
8232
8233 /* Leave HM context, takes care of local init (term). */
8234 int rc = HMR0LeaveCpu(pVCpu);
8235 AssertRC(rc); NOREF(rc);
8236
8237 /* Restore longjmp state. */
8238 VMMRZCallRing3Enable(pVCpu);
8239 STAM_COUNTER_INC(&pVCpu->hm.s.StatPreemptPreempting);
8240 break;
8241 }
8242
8243 case RTTHREADCTXEVENT_RESUMED:
8244 {
8245 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8246 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
8247 VMCPU_ASSERT_EMT(pVCpu);
8248
8249 /* No longjmps here, as we don't want to trigger preemption (& its hook) while resuming. */
8250 VMMRZCallRing3Disable(pVCpu);
8251 Log4Func(("Resumed: HostCpuId=%u\n", RTMpCpuId()));
8252
8253 /* Initialize the bare minimum state required for HM. This takes care of
8254 initializing VT-x if necessary (onlined CPUs, local init etc.) */
8255 int rc = HMR0EnterCpu(pVCpu);
8256 AssertRC(rc);
8257 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
8258
8259 /* Load the active VMCS as the current one. */
8260 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR)
8261 {
8262 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8263 AssertRC(rc); NOREF(rc);
8264 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8265 Log4Func(("Resumed: Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8266 }
8267 pVCpu->hm.s.fLeaveDone = false;
8268
8269 /* Restore longjmp state. */
8270 VMMRZCallRing3Enable(pVCpu);
8271 break;
8272 }
8273
8274 default:
8275 break;
8276 }
8277}
8278
8279
8280/**
8281 * Saves the host state in the VMCS host-state.
8282 * Sets up the VM-exit MSR-load area.
8283 *
8284 * The CPU state will be loaded from these fields on every successful VM-exit.
8285 *
8286 * @returns VBox status code.
8287 * @param pVM Pointer to the VM.
8288 * @param pVCpu Pointer to the VMCPU.
8289 *
8290 * @remarks No-long-jump zone!!!
8291 */
8292static int hmR0VmxSaveHostState(PVM pVM, PVMCPU pVCpu)
8293{
8294 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8295
8296 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
8297 return VINF_SUCCESS;
8298
8299 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
8300 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8301
8302 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
8303 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8304
8305 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
8306 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8307
8308 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
8309 return rc;
8310}
8311
8312
8313/**
8314 * Saves the host state in the VMCS host-state.
8315 *
8316 * @returns VBox status code.
8317 * @param pVM Pointer to the VM.
8318 * @param pVCpu Pointer to the VMCPU.
8319 *
8320 * @remarks No-long-jump zone!!!
8321 */
8322VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
8323{
8324 AssertPtr(pVM);
8325 AssertPtr(pVCpu);
8326
8327 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8328
8329 /* Save the host state here while entering HM context. When thread-context hooks are used, we might get preempted
8330 and have to resave the host state but most of the time we won't be, so do it here before we disable interrupts. */
8331 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8332 return hmR0VmxSaveHostState(pVM, pVCpu);
8333}
8334
8335
8336/**
8337 * Loads the guest state into the VMCS guest-state area.
8338 *
8339 * The will typically be done before VM-entry when the guest-CPU state and the
8340 * VMCS state may potentially be out of sync.
8341 *
8342 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas. Sets up the
8343 * VM-entry controls.
8344 * Sets up the appropriate VMX non-root function to execute guest code based on
8345 * the guest CPU mode.
8346 *
8347 * @returns VBox status code.
8348 * @param pVM Pointer to the VM.
8349 * @param pVCpu Pointer to the VMCPU.
8350 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8351 * out-of-sync. Make sure to update the required fields
8352 * before using them.
8353 *
8354 * @remarks No-long-jump zone!!!
8355 */
8356static int hmR0VmxLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
8357{
8358 AssertPtr(pVM);
8359 AssertPtr(pVCpu);
8360 AssertPtr(pMixedCtx);
8361 HMVMX_ASSERT_PREEMPT_SAFE();
8362
8363 VMMRZCallRing3Disable(pVCpu);
8364 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8365
8366 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8367
8368 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
8369
8370 /* Determine real-on-v86 mode. */
8371 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
8372 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
8373 && CPUMIsGuestInRealModeEx(pMixedCtx))
8374 {
8375 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
8376 }
8377
8378 /*
8379 * Load the guest-state into the VMCS.
8380 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
8381 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
8382 */
8383 int rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
8384 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8385
8386 /* This needs to be done after hmR0VmxSetupVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */
8387 rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
8388 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8389
8390 /* This needs to be done after hmR0VmxSetupVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */
8391 rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
8392 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8393
8394 rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
8395 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8396
8397 rc = hmR0VmxLoadGuestCR3AndCR4(pVCpu, pMixedCtx);
8398 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestCR3AndCR4: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8399
8400 /* Assumes pMixedCtx->cr0 is up-to-date (strict builds require CR0 for segment register validation checks). */
8401 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
8402 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8403
8404 /* This needs to be done after hmR0VmxLoadGuestEntryCtls() and hmR0VmxLoadGuestExitCtls() as it may alter controls if we
8405 determine we don't have to swap EFER after all. */
8406 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
8407 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadSharedMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8408
8409 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
8410 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8411
8412 rc = hmR0VmxLoadGuestXcptIntercepts(pVCpu, pMixedCtx);
8413 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestXcptIntercepts! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8414
8415 /*
8416 * Loading Rflags here is fine, even though Rflags.TF might depend on guest debug state (which is not loaded here).
8417 * It is re-evaluated and updated if necessary in hmR0VmxLoadSharedState().
8418 */
8419 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
8420 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestRipRspRflags! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8421
8422 /* Clear any unused and reserved bits. */
8423 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
8424
8425 VMMRZCallRing3Enable(pVCpu);
8426
8427 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
8428 return rc;
8429}
8430
8431
8432/**
8433 * Loads the state shared between the host and guest into the VMCS.
8434 *
8435 * @param pVM Pointer to the VM.
8436 * @param pVCpu Pointer to the VMCPU.
8437 * @param pCtx Pointer to the guest-CPU context.
8438 *
8439 * @remarks No-long-jump zone!!!
8440 */
8441static void hmR0VmxLoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8442{
8443 NOREF(pVM);
8444
8445 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8446 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8447
8448 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
8449 {
8450 int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx);
8451 AssertRC(rc);
8452 }
8453
8454 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
8455 {
8456 int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx);
8457 AssertRC(rc);
8458
8459 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
8460 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
8461 {
8462 rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx);
8463 AssertRC(rc);
8464 }
8465 }
8466
8467 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS))
8468 {
8469#if HC_ARCH_BITS == 64
8470 if (pVM->hm.s.fAllow64BitGuests)
8471 hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
8472#endif
8473 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
8474 }
8475
8476 /* Loading CR0, debug state might have changed intercepts, update VMCS. */
8477 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
8478 {
8479 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
8480 AssertRC(rc);
8481 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
8482 }
8483
8484 AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
8485 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8486}
8487
8488
8489/**
8490 * Worker for loading the guest-state bits in the inner VT-x execution loop.
8491 *
8492 * @param pVM Pointer to the VM.
8493 * @param pVCpu Pointer to the VMCPU.
8494 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8495 * out-of-sync. Make sure to update the required fields
8496 * before using them.
8497 */
8498DECLINLINE(void) hmR0VmxLoadGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
8499{
8500 HMVMX_ASSERT_PREEMPT_SAFE();
8501
8502 Log5(("LoadFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8503#ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
8504 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
8505#endif
8506
8507 if (HMCPU_CF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP))
8508 {
8509 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
8510 AssertRC(rc);
8511 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
8512 }
8513 else if (HMCPU_CF_VALUE(pVCpu))
8514 {
8515 int rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx);
8516 AssertRC(rc);
8517 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
8518 }
8519
8520 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
8521 AssertMsg( !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
8522 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
8523 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8524}
8525
8526
8527/**
8528 * Does the preparations before executing guest code in VT-x.
8529 *
8530 * This may cause longjmps to ring-3 and may even result in rescheduling to the
8531 * recompiler/IEM. We must be cautious what we do here regarding committing
8532 * guest-state information into the VMCS assuming we assuredly execute the
8533 * guest in VT-x mode.
8534 *
8535 * If we fall back to the recompiler/IEM after updating the VMCS and clearing
8536 * the common-state (TRPM/forceflags), we must undo those changes so that the
8537 * recompiler/IEM can (and should) use them when it resumes guest execution.
8538 * Otherwise such operations must be done when we can no longer exit to ring-3.
8539 *
8540 * @returns Strict VBox status code.
8541 * @retval VINF_SUCCESS if we can proceed with running the guest, interrupts
8542 * have been disabled.
8543 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a
8544 * double-fault into the guest.
8545 * @retval VINF_EM_DBG_STEPPED if @a fStepping is true and an event was
8546 * dispatched directly.
8547 * @retval VINF_* scheduling changes, we have to go back to ring-3.
8548 *
8549 * @param pVM Pointer to the VM.
8550 * @param pVCpu Pointer to the VMCPU.
8551 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8552 * out-of-sync. Make sure to update the required fields
8553 * before using them.
8554 * @param pVmxTransient Pointer to the VMX transient structure.
8555 * @param fStepping Set if called from hmR0VmxRunGuestCodeStep(). Makes
8556 * us ignore some of the reasons for returning to
8557 * ring-3, and return VINF_EM_DBG_STEPPED if event
8558 * dispatching took place.
8559 */
8560static int hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping)
8561{
8562 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8563
8564#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
8565 PGMRZDynMapFlushAutoSet(pVCpu);
8566#endif
8567
8568 /* Check force flag actions that might require us to go back to ring-3. */
8569 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
8570 if (rc != VINF_SUCCESS)
8571 return rc;
8572
8573#ifndef IEM_VERIFICATION_MODE_FULL
8574 /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
8575 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
8576 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
8577 {
8578 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
8579 RTGCPHYS GCPhysApicBase;
8580 GCPhysApicBase = pMixedCtx->msrApicBase;
8581 GCPhysApicBase &= PAGE_BASE_GC_MASK;
8582
8583 /* Unalias any existing mapping. */
8584 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
8585 AssertRCReturn(rc, rc);
8586
8587 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
8588 Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));
8589 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
8590 AssertRCReturn(rc, rc);
8591
8592 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
8593 }
8594#endif /* !IEM_VERIFICATION_MODE_FULL */
8595
8596 if (TRPMHasTrap(pVCpu))
8597 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
8598 else if (!pVCpu->hm.s.Event.fPending)
8599 hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);
8600
8601 /*
8602 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus needs to be done with
8603 * longjmps or interrupts + preemption enabled. Event injection might also result in triple-faulting the VM.
8604 */
8605 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fStepping);
8606 if (RT_UNLIKELY(rc != VINF_SUCCESS))
8607 {
8608 Assert(rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping));
8609 return rc;
8610 }
8611
8612 /*
8613 * Load the guest state bits, we can handle longjmps/getting preempted here.
8614 *
8615 * If we are injecting events to a real-on-v86 mode guest, we will have to update
8616 * RIP and some segment registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs().
8617 * Hence, this needs to be done -after- injection of events.
8618 */
8619 hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);
8620
8621 /*
8622 * No longjmps to ring-3 from this point on!!!
8623 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
8624 * This also disables flushing of the R0-logger instance (if any).
8625 */
8626 VMMRZCallRing3Disable(pVCpu);
8627
8628 /*
8629 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
8630 * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
8631 *
8632 * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
8633 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
8634 *
8635 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
8636 * executing guest code.
8637 */
8638 pVmxTransient->uEflags = ASMIntDisableFlags();
8639 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
8640 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
8641 && ( !fStepping /* Optimized for the non-stepping case, of course. */
8642 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) )
8643 {
8644 hmR0VmxClearEventVmcs(pVCpu);
8645 ASMSetFlags(pVmxTransient->uEflags);
8646 VMMRZCallRing3Enable(pVCpu);
8647 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
8648 return VINF_EM_RAW_TO_R3;
8649 }
8650
8651 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
8652 {
8653 hmR0VmxClearEventVmcs(pVCpu);
8654 ASMSetFlags(pVmxTransient->uEflags);
8655 VMMRZCallRing3Enable(pVCpu);
8656 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
8657 return VINF_EM_RAW_INTERRUPT;
8658 }
8659
8660 /* We've injected any pending events. This is really the point of no return (to ring-3). */
8661 pVCpu->hm.s.Event.fPending = false;
8662
8663 return VINF_SUCCESS;
8664}
8665
8666
8667/**
8668 * Prepares to run guest code in VT-x and we've committed to doing so. This
8669 * means there is no backing out to ring-3 or anywhere else at this
8670 * point.
8671 *
8672 * @param pVM Pointer to the VM.
8673 * @param pVCpu Pointer to the VMCPU.
8674 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8675 * out-of-sync. Make sure to update the required fields
8676 * before using them.
8677 * @param pVmxTransient Pointer to the VMX transient structure.
8678 *
8679 * @remarks Called with preemption disabled.
8680 * @remarks No-long-jump zone!!!
8681 */
8682static void hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8683{
8684 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8685 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8686 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8687
8688 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8689 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
8690
8691#ifdef HMVMX_ALWAYS_SWAP_FPU_STATE
8692 if (!CPUMIsGuestFPUStateActive(pVCpu))
8693 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8694 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8695#endif
8696
8697 if ( pVCpu->hm.s.fPreloadGuestFpu
8698 && !CPUMIsGuestFPUStateActive(pVCpu))
8699 {
8700 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8701 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
8702 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8703 }
8704
8705 /*
8706 * Lazy-update of the host MSRs values in the auto-load/store MSR area.
8707 */
8708 if ( !pVCpu->hm.s.vmx.fUpdatedHostMsrs
8709 && pVCpu->hm.s.vmx.cMsrs > 0)
8710 {
8711 hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu);
8712 }
8713
8714 /*
8715 * Load the host state bits as we may've been preempted (only happens when
8716 * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM).
8717 */
8718 /** @todo Why should hmR0VmxSetupVMRunHandler() changing pfnStartVM have
8719 * any effect to the host state needing to be saved? */
8720 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
8721 {
8722 /* This ASSUMES that pfnStartVM has been set up already. */
8723 int rc = hmR0VmxSaveHostState(pVM, pVCpu);
8724 AssertRC(rc);
8725 STAM_COUNTER_INC(&pVCpu->hm.s.StatPreemptSaveHostState);
8726 }
8727 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT));
8728
8729 /*
8730 * Load the state shared between host and guest (FPU, debug, lazy MSRs).
8731 */
8732 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
8733 hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx);
8734 AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8735
8736 /* Store status of the shared guest-host state at the time of VM-entry. */
8737#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
8738 if (CPUMIsGuestInLongModeEx(pMixedCtx))
8739 {
8740 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
8741 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
8742 }
8743 else
8744#endif
8745 {
8746 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
8747 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
8748 }
8749 pVmxTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);
8750
8751 /*
8752 * Cache the TPR-shadow for checking on every VM-exit if it might have changed.
8753 */
8754 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8755 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
8756
8757 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
8758 RTCPUID idCurrentCpu = pCpu->idCpu;
8759 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
8760 || idCurrentCpu != pVCpu->hm.s.idLastCpu)
8761 {
8762 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVM, pVCpu);
8763 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
8764 }
8765
8766 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
8767 hmR0VmxFlushTaggedTlb(pVCpu, pCpu); /* Invalidate the appropriate guest entries from the TLB. */
8768 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
8769 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */
8770
8771 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
8772
8773 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
8774 to start executing. */
8775
8776 /*
8777 * Load the TSC_AUX MSR when we are not intercepting RDTSCP.
8778 */
8779 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
8780 {
8781 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8782 {
8783 int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
8784 AssertRC(rc2);
8785 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS));
8786 bool fMsrUpdated = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMR0GetGuestTscAux(pVCpu),
8787 true /* fUpdateHostMsr */);
8788 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8789 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */
8790 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
8791 }
8792 else
8793 {
8794 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX);
8795 Assert(!pVCpu->hm.s.vmx.cMsrs || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8796 }
8797 }
8798
8799#ifdef VBOX_STRICT
8800 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu);
8801 hmR0VmxCheckHostEferMsr(pVCpu);
8802 AssertRC(hmR0VmxCheckVmcsCtls(pVCpu));
8803#endif
8804#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
8805 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
8806 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
8807 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
8808#endif
8809}
8810
8811
8812/**
8813 * Performs some essential restoration of state after running guest code in
8814 * VT-x.
8815 *
8816 * @param pVM Pointer to the VM.
8817 * @param pVCpu Pointer to the VMCPU.
8818 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
8819 * out-of-sync. Make sure to update the required fields
8820 * before using them.
8821 * @param pVmxTransient Pointer to the VMX transient structure.
8822 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
8823 *
8824 * @remarks Called with interrupts disabled, and returns with interrups enabled!
8825 *
8826 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
8827 * unconditionally when it is safe to do so.
8828 */
8829static void hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
8830{
8831 NOREF(pVM);
8832
8833 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8834
8835 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
8836 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
8837 HMVMXCPU_GST_RESET_TO(pVCpu, 0); /* Exits/longjmps to ring-3 requires saving the guest state. */
8838 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
8839 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
8840 pVmxTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
8841
8842 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8843 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset);
8844
8845 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
8846 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
8847 Assert(!(ASMGetFlags() & X86_EFL_IF));
8848 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8849
8850#ifdef HMVMX_ALWAYS_SWAP_FPU_STATE
8851 if (CPUMIsGuestFPUStateActive(pVCpu))
8852 {
8853 hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8854 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
8855 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8856 }
8857#endif
8858
8859#if HC_ARCH_BITS == 64
8860 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Host state messed up by VT-x, we must restore. */
8861#endif
8862 pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
8863#ifdef VBOX_STRICT
8864 hmR0VmxCheckHostEferMsr(pVCpu); /* Verify that VMRUN/VMLAUNCH didn't modify host EFER. */
8865#endif
8866 ASMSetFlags(pVmxTransient->uEflags); /* Enable interrupts. */
8867 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
8868
8869 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
8870 uint32_t uExitReason;
8871 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
8872 rc |= hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
8873 AssertRC(rc);
8874 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
8875 pVmxTransient->fVMEntryFailed = VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uEntryIntInfo);
8876
8877 /* Update the VM-exit history array. */
8878 HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmxTransient->uExitReason);
8879
8880 /* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */
8881 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
8882 {
8883 Log4(("VM-entry failure: pVCpu=%p idCpu=%RU32 rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", pVCpu, pVCpu->idCpu, rcVMRun,
8884 pVmxTransient->fVMEntryFailed));
8885 return;
8886 }
8887
8888 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
8889 {
8890 /** @todo We can optimize this by only syncing with our force-flags when
8891 * really needed and keeping the VMCS state as it is for most
8892 * VM-exits. */
8893 /* Update the guest interruptibility-state from the VMCS. */
8894 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
8895
8896#if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
8897 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8898 AssertRC(rc);
8899#elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
8900 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8901 AssertRC(rc);
8902#endif
8903
8904 /*
8905 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
8906 * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3 and which is
8907 * why it's done here as it's easier and no less efficient to deal with it here than making hmR0VmxSaveGuestState()
8908 * cope with longjmps safely (see VMCPU_FF_HM_UPDATE_CR3 handling).
8909 */
8910 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8911 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
8912 {
8913 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
8914 AssertRC(rc);
8915 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
8916 }
8917 }
8918}
8919
8920
8921/**
8922 * Runs the guest code using VT-x the normal way.
8923 *
8924 * @returns VBox status code.
8925 * @param pVM Pointer to the VM.
8926 * @param pVCpu Pointer to the VMCPU.
8927 * @param pCtx Pointer to the guest-CPU context.
8928 *
8929 * @note Mostly the same as hmR0VmxRunGuestCodeStep().
8930 */
8931static int hmR0VmxRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8932{
8933 VMXTRANSIENT VmxTransient;
8934 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
8935 int rc = VERR_INTERNAL_ERROR_5;
8936 uint32_t cLoops = 0;
8937
8938 for (;; cLoops++)
8939 {
8940 Assert(!HMR0SuspendPending());
8941 HMVMX_ASSERT_CPU_SAFE();
8942
8943 /* Preparatory work for running guest code, this may force us to return
8944 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
8945 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
8946 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, false /* fStepping */);
8947 if (rc != VINF_SUCCESS)
8948 break;
8949
8950 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
8951 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
8952 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
8953
8954 /* Restore any residual host-state and save any bits shared between host
8955 and guest into the guest-CPU state. Re-enables interrupts! */
8956 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
8957
8958 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
8959 if (RT_UNLIKELY(rc != VINF_SUCCESS))
8960 {
8961 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
8962 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
8963 return rc;
8964 }
8965
8966 /* Profile the VM-exit. */
8967 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
8968 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
8969 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
8970 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
8971 HMVMX_START_EXIT_DISPATCH_PROF();
8972
8973 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
8974 if (RT_UNLIKELY(VBOXVMM_R0_HMVMX_VMEXIT_ENABLED()))
8975 {
8976 hmR0VmxReadExitQualificationVmcs(pVCpu, &VmxTransient);
8977 hmR0VmxSaveGuestState(pVCpu, pCtx);
8978 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pCtx, VmxTransient.uExitReason, VmxTransient.uExitQualification);
8979 }
8980
8981 /* Handle the VM-exit. */
8982#ifdef HMVMX_USE_FUNCTION_TABLE
8983 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
8984#else
8985 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
8986#endif
8987 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
8988 if (rc != VINF_SUCCESS)
8989 break;
8990 if (cLoops > pVM->hm.s.cMaxResumeLoops)
8991 {
8992 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
8993 rc = VINF_EM_RAW_INTERRUPT;
8994 break;
8995 }
8996 }
8997
8998 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
8999 return rc;
9000}
9001
9002
9003/**
9004 * Single steps guest code using VT-x.
9005 *
9006 * @returns VBox status code.
9007 * @param pVM Pointer to the VM.
9008 * @param pVCpu Pointer to the VMCPU.
9009 * @param pCtx Pointer to the guest-CPU context.
9010 *
9011 * @note Mostly the same as hmR0VmxRunGuestCodeNormal().
9012 */
9013static int hmR0VmxRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
9014{
9015 VMXTRANSIENT VmxTransient;
9016 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
9017 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
9018 uint32_t cLoops = 0;
9019 uint16_t uCsStart = pCtx->cs.Sel;
9020 uint64_t uRipStart = pCtx->rip;
9021
9022 for (;; cLoops++)
9023 {
9024 Assert(!HMR0SuspendPending());
9025 HMVMX_ASSERT_CPU_SAFE();
9026
9027 /* Preparatory work for running guest code, this may force us to return
9028 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
9029 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
9030 rcStrict = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, true /* fStepping */);
9031 if (rcStrict != VINF_SUCCESS)
9032 break;
9033
9034 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
9035 rcStrict = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
9036 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
9037
9038 /* Restore any residual host-state and save any bits shared between host
9039 and guest into the guest-CPU state. Re-enables interrupts! */
9040 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, VBOXSTRICTRC_TODO(rcStrict));
9041
9042 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
9043 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
9044 {
9045 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
9046 hmR0VmxReportWorldSwitchError(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict), pCtx, &VmxTransient);
9047 return VBOXSTRICTRC_TODO(rcStrict);
9048 }
9049
9050 /* Profile the VM-exit. */
9051 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
9052 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
9053 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
9054 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
9055 HMVMX_START_EXIT_DISPATCH_PROF();
9056
9057 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
9058 if (RT_UNLIKELY(VBOXVMM_R0_HMVMX_VMEXIT_ENABLED()))
9059 {
9060 hmR0VmxReadExitQualificationVmcs(pVCpu, &VmxTransient);
9061 hmR0VmxSaveGuestState(pVCpu, pCtx);
9062 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pCtx, VmxTransient.uExitReason, VmxTransient.uExitQualification);
9063 }
9064
9065 /* Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitStep(). */
9066 rcStrict = hmR0VmxHandleExitStep(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, uCsStart, uRipStart);
9067 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
9068 if (rcStrict != VINF_SUCCESS)
9069 break;
9070 if (cLoops > pVM->hm.s.cMaxResumeLoops)
9071 {
9072 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
9073 rcStrict = VINF_EM_RAW_INTERRUPT;
9074 break;
9075 }
9076
9077 /*
9078 * Did the RIP change, if so, consider it a single step.
9079 * Otherwise, make sure one of the TFs gets set.
9080 */
9081 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pCtx);
9082 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pCtx);
9083 AssertRCReturn(rc2, rc2);
9084 if ( pCtx->rip != uRipStart
9085 || pCtx->cs.Sel != uCsStart)
9086 {
9087 rcStrict = VINF_EM_DBG_STEPPED;
9088 break;
9089 }
9090 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
9091 }
9092
9093 /*
9094 * Clear the X86_EFL_TF if necessary.
9095 */
9096 if (pVCpu->hm.s.fClearTrapFlag)
9097 {
9098 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pCtx);
9099 AssertRCReturn(rc2, rc2);
9100 pVCpu->hm.s.fClearTrapFlag = false;
9101 pCtx->eflags.Bits.u1TF = 0;
9102 }
9103 /** @todo there seems to be issues with the resume flag when the monitor trap
9104 * flag is pending without being used. Seen early in bios init when
9105 * accessing APIC page in protected mode. */
9106
9107 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
9108 return VBOXSTRICTRC_TODO(rcStrict);
9109}
9110
9111
9112/**
9113 * Runs the guest code using VT-x.
9114 *
9115 * @returns VBox status code.
9116 * @param pVM Pointer to the VM.
9117 * @param pVCpu Pointer to the VMCPU.
9118 * @param pCtx Pointer to the guest-CPU context.
9119 */
9120VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
9121{
9122 Assert(VMMRZCallRing3IsEnabled(pVCpu));
9123 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
9124 HMVMX_ASSERT_PREEMPT_SAFE();
9125
9126 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pCtx);
9127
9128 int rc;
9129 if (!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu))
9130 rc = hmR0VmxRunGuestCodeNormal(pVM, pVCpu, pCtx);
9131 else
9132 rc = hmR0VmxRunGuestCodeStep(pVM, pVCpu, pCtx);
9133
9134 if (rc == VERR_EM_INTERPRETER)
9135 rc = VINF_EM_RAW_EMULATE_INSTR;
9136 else if (rc == VINF_EM_RESET)
9137 rc = VINF_EM_TRIPLE_FAULT;
9138
9139 int rc2 = hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rc);
9140 if (RT_FAILURE(rc2))
9141 {
9142 pVCpu->hm.s.u32HMError = rc;
9143 rc = rc2;
9144 }
9145 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
9146 return rc;
9147}
9148
9149
9150#ifndef HMVMX_USE_FUNCTION_TABLE
9151DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
9152{
9153#ifdef DEBUG_ramshankar
9154# define SVVMCS() do { int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); AssertRC(rc2); } while (0)
9155# define LDVMCS() do { HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); } while (0)
9156#endif
9157 int rc;
9158 switch (rcReason)
9159 {
9160 case VMX_EXIT_EPT_MISCONFIG: /* SVVMCS(); */ rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9161 case VMX_EXIT_EPT_VIOLATION: /* SVVMCS(); */ rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9162 case VMX_EXIT_IO_INSTR: /* SVVMCS(); */ rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9163 case VMX_EXIT_CPUID: /* SVVMCS(); */ rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9164 case VMX_EXIT_RDTSC: /* SVVMCS(); */ rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9165 case VMX_EXIT_RDTSCP: /* SVVMCS(); */ rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9166 case VMX_EXIT_APIC_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9167 case VMX_EXIT_XCPT_OR_NMI: /* SVVMCS(); */ rc = hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9168 case VMX_EXIT_MOV_CRX: /* SVVMCS(); */ rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9169 case VMX_EXIT_EXT_INT: /* SVVMCS(); */ rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9170 case VMX_EXIT_INT_WINDOW: /* SVVMCS(); */ rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9171 case VMX_EXIT_MWAIT: /* SVVMCS(); */ rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9172 case VMX_EXIT_MONITOR: /* SVVMCS(); */ rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9173 case VMX_EXIT_TASK_SWITCH: /* SVVMCS(); */ rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9174 case VMX_EXIT_PREEMPT_TIMER: /* SVVMCS(); */ rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9175 case VMX_EXIT_RDMSR: /* SVVMCS(); */ rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9176 case VMX_EXIT_WRMSR: /* SVVMCS(); */ rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9177 case VMX_EXIT_MOV_DRX: /* SVVMCS(); */ rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9178 case VMX_EXIT_TPR_BELOW_THRESHOLD: /* SVVMCS(); */ rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9179 case VMX_EXIT_HLT: /* SVVMCS(); */ rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9180 case VMX_EXIT_INVD: /* SVVMCS(); */ rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9181 case VMX_EXIT_INVLPG: /* SVVMCS(); */ rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9182 case VMX_EXIT_RSM: /* SVVMCS(); */ rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9183 case VMX_EXIT_MTF: /* SVVMCS(); */ rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9184 case VMX_EXIT_PAUSE: /* SVVMCS(); */ rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9185 case VMX_EXIT_XDTR_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9186 case VMX_EXIT_TR_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9187 case VMX_EXIT_WBINVD: /* SVVMCS(); */ rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9188 case VMX_EXIT_XSETBV: /* SVVMCS(); */ rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9189 case VMX_EXIT_RDRAND: /* SVVMCS(); */ rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9190 case VMX_EXIT_INVPCID: /* SVVMCS(); */ rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9191 case VMX_EXIT_GETSEC: /* SVVMCS(); */ rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9192 case VMX_EXIT_RDPMC: /* SVVMCS(); */ rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9193 case VMX_EXIT_VMCALL: /* SVVMCS(); */ rc = hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9194
9195 case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break;
9196 case VMX_EXIT_NMI_WINDOW: rc = hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient); break;
9197 case VMX_EXIT_INIT_SIGNAL: rc = hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient); break;
9198 case VMX_EXIT_SIPI: rc = hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient); break;
9199 case VMX_EXIT_IO_SMI: rc = hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient); break;
9200 case VMX_EXIT_SMI: rc = hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient); break;
9201 case VMX_EXIT_ERR_MSR_LOAD: rc = hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient); break;
9202 case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient); break;
9203 case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient); break;
9204
9205 case VMX_EXIT_VMCLEAR:
9206 case VMX_EXIT_VMLAUNCH:
9207 case VMX_EXIT_VMPTRLD:
9208 case VMX_EXIT_VMPTRST:
9209 case VMX_EXIT_VMREAD:
9210 case VMX_EXIT_VMRESUME:
9211 case VMX_EXIT_VMWRITE:
9212 case VMX_EXIT_VMXOFF:
9213 case VMX_EXIT_VMXON:
9214 case VMX_EXIT_INVEPT:
9215 case VMX_EXIT_INVVPID:
9216 case VMX_EXIT_VMFUNC:
9217 case VMX_EXIT_XSAVES:
9218 case VMX_EXIT_XRSTORS:
9219 rc = hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
9220 break;
9221 case VMX_EXIT_RESERVED_60:
9222 case VMX_EXIT_RDSEED: /* only spurious exits, so undefined */
9223 case VMX_EXIT_RESERVED_62:
9224 default:
9225 rc = hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
9226 break;
9227 }
9228 return rc;
9229}
9230#endif /* !HMVMX_USE_FUNCTION_TABLE */
9231
9232
9233/**
9234 * Single-stepping VM-exit filtering.
9235 *
9236 * This is preprocessing the exits and deciding whether we've gotten far enough
9237 * to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit handling is
9238 * performed.
9239 *
9240 * @returns Strict VBox status code.
9241 * @param pVCpu The virtual CPU of the calling EMT.
9242 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
9243 * out-of-sync. Make sure to update the required
9244 * fields before using them.
9245 * @param pVmxTransient Pointer to the VMX-transient structure.
9246 * @param uExitReason The VM-exit reason.
9247 */
9248DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitStep(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
9249 uint32_t uExitReason, uint16_t uCsStart, uint64_t uRipStart)
9250{
9251 switch (uExitReason)
9252 {
9253 case VMX_EXIT_XCPT_OR_NMI:
9254 {
9255 /* Check for host NMI. */
9256 int rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
9257 AssertRCReturn(rc2, rc2);
9258 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
9259 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9260 return hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient);
9261 /* fall thru */
9262 }
9263
9264 case VMX_EXIT_EPT_MISCONFIG:
9265 case VMX_EXIT_TRIPLE_FAULT:
9266 case VMX_EXIT_APIC_ACCESS:
9267 case VMX_EXIT_TPR_BELOW_THRESHOLD:
9268 case VMX_EXIT_TASK_SWITCH:
9269
9270 /* Instruction specific VM-exits: */
9271 case VMX_EXIT_IO_INSTR:
9272 case VMX_EXIT_CPUID:
9273 case VMX_EXIT_RDTSC:
9274 case VMX_EXIT_RDTSCP:
9275 case VMX_EXIT_MOV_CRX:
9276 case VMX_EXIT_MWAIT:
9277 case VMX_EXIT_MONITOR:
9278 case VMX_EXIT_RDMSR:
9279 case VMX_EXIT_WRMSR:
9280 case VMX_EXIT_MOV_DRX:
9281 case VMX_EXIT_HLT:
9282 case VMX_EXIT_INVD:
9283 case VMX_EXIT_INVLPG:
9284 case VMX_EXIT_RSM:
9285 case VMX_EXIT_PAUSE:
9286 case VMX_EXIT_XDTR_ACCESS:
9287 case VMX_EXIT_TR_ACCESS:
9288 case VMX_EXIT_WBINVD:
9289 case VMX_EXIT_XSETBV:
9290 case VMX_EXIT_RDRAND:
9291 case VMX_EXIT_INVPCID:
9292 case VMX_EXIT_GETSEC:
9293 case VMX_EXIT_RDPMC:
9294 case VMX_EXIT_VMCALL:
9295 case VMX_EXIT_VMCLEAR:
9296 case VMX_EXIT_VMLAUNCH:
9297 case VMX_EXIT_VMPTRLD:
9298 case VMX_EXIT_VMPTRST:
9299 case VMX_EXIT_VMREAD:
9300 case VMX_EXIT_VMRESUME:
9301 case VMX_EXIT_VMWRITE:
9302 case VMX_EXIT_VMXOFF:
9303 case VMX_EXIT_VMXON:
9304 case VMX_EXIT_INVEPT:
9305 case VMX_EXIT_INVVPID:
9306 case VMX_EXIT_VMFUNC:
9307 {
9308 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
9309 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9310 AssertRCReturn(rc2, rc2);
9311 if ( pMixedCtx->rip != uRipStart
9312 || pMixedCtx->cs.Sel != uCsStart)
9313 return VINF_EM_DBG_STEPPED;
9314 break;
9315 }
9316 }
9317
9318 /*
9319 * Normal processing.
9320 */
9321#ifdef HMVMX_USE_FUNCTION_TABLE
9322 return g_apfnVMExitHandlers[uExitReason](pVCpu, pMixedCtx, pVmxTransient);
9323#else
9324 return hmR0VmxHandleExit(pVCpu, pMixedCtx, pVmxTransient, uExitReason);
9325#endif
9326}
9327
9328
9329#ifdef DEBUG
9330/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
9331# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
9332 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
9333
9334# define HMVMX_ASSERT_PREEMPT_CPUID() \
9335 do \
9336 { \
9337 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
9338 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
9339 } while (0)
9340
9341# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
9342 do { \
9343 AssertPtr(pVCpu); \
9344 AssertPtr(pMixedCtx); \
9345 AssertPtr(pVmxTransient); \
9346 Assert(pVmxTransient->fVMEntryFailed == false); \
9347 Assert(ASMIntAreEnabled()); \
9348 HMVMX_ASSERT_PREEMPT_SAFE(); \
9349 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
9350 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", pVCpu->idCpu)); \
9351 HMVMX_ASSERT_PREEMPT_SAFE(); \
9352 if (VMMR0IsLogFlushDisabled(pVCpu)) \
9353 HMVMX_ASSERT_PREEMPT_CPUID(); \
9354 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
9355 } while (0)
9356
9357# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
9358 do { \
9359 Log4Func(("\n")); \
9360 } while (0)
9361#else /* Release builds */
9362# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
9363 do { \
9364 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
9365 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient); \
9366 } while (0)
9367# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while (0)
9368#endif
9369
9370
9371/**
9372 * Advances the guest RIP after reading it from the VMCS.
9373 *
9374 * @returns VBox status code.
9375 * @param pVCpu Pointer to the VMCPU.
9376 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
9377 * out-of-sync. Make sure to update the required fields
9378 * before using them.
9379 * @param pVmxTransient Pointer to the VMX transient structure.
9380 *
9381 * @remarks No-long-jump zone!!!
9382 */
9383DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9384{
9385 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
9386 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
9387 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
9388 AssertRCReturn(rc, rc);
9389
9390 pMixedCtx->rip += pVmxTransient->cbInstr;
9391 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
9392
9393 /*
9394 * Deliver a debug exception to the guest if it is single-stepping. Don't directly inject a #DB but use the
9395 * pending debug exception field as it takes care of priority of events.
9396 *
9397 * See Intel spec. 32.2.1 "Debug Exceptions".
9398 */
9399 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
9400
9401 return rc;
9402}
9403
9404
9405/**
9406 * Tries to determine what part of the guest-state VT-x has deemed as invalid
9407 * and update error record fields accordingly.
9408 *
9409 * @return VMX_IGS_* return codes.
9410 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
9411 * wrong with the guest state.
9412 *
9413 * @param pVM Pointer to the VM.
9414 * @param pVCpu Pointer to the VMCPU.
9415 * @param pCtx Pointer to the guest-CPU state.
9416 *
9417 * @remarks This function assumes our cache of the VMCS controls
9418 * are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
9419 */
9420static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
9421{
9422#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
9423#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \
9424 uError = (err); \
9425 break; \
9426 } else do { } while (0)
9427
9428 int rc;
9429 uint32_t uError = VMX_IGS_ERROR;
9430 uint32_t u32Val;
9431 bool fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
9432
9433 do
9434 {
9435 /*
9436 * CR0.
9437 */
9438 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
9439 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
9440 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG).
9441 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
9442 if (fUnrestrictedGuest)
9443 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
9444
9445 uint32_t u32GuestCR0;
9446 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCR0);
9447 AssertRCBreak(rc);
9448 HMVMX_CHECK_BREAK((u32GuestCR0 & uSetCR0) == uSetCR0, VMX_IGS_CR0_FIXED1);
9449 HMVMX_CHECK_BREAK(!(u32GuestCR0 & ~uZapCR0), VMX_IGS_CR0_FIXED0);
9450 if ( !fUnrestrictedGuest
9451 && (u32GuestCR0 & X86_CR0_PG)
9452 && !(u32GuestCR0 & X86_CR0_PE))
9453 {
9454 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
9455 }
9456
9457 /*
9458 * CR4.
9459 */
9460 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
9461 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
9462
9463 uint32_t u32GuestCR4;
9464 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCR4);
9465 AssertRCBreak(rc);
9466 HMVMX_CHECK_BREAK((u32GuestCR4 & uSetCR4) == uSetCR4, VMX_IGS_CR4_FIXED1);
9467 HMVMX_CHECK_BREAK(!(u32GuestCR4 & ~uZapCR4), VMX_IGS_CR4_FIXED0);
9468
9469 /*
9470 * IA32_DEBUGCTL MSR.
9471 */
9472 uint64_t u64Val;
9473 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
9474 AssertRCBreak(rc);
9475 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
9476 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
9477 {
9478 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
9479 }
9480 uint64_t u64DebugCtlMsr = u64Val;
9481
9482#ifdef VBOX_STRICT
9483 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
9484 AssertRCBreak(rc);
9485 Assert(u32Val == pVCpu->hm.s.vmx.u32EntryCtls);
9486#endif
9487 bool const fLongModeGuest = RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
9488
9489 /*
9490 * RIP and RFLAGS.
9491 */
9492 uint32_t u32Eflags;
9493#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9494 if (HMVMX_IS_64BIT_HOST_MODE())
9495 {
9496 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
9497 AssertRCBreak(rc);
9498 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
9499 if ( !fLongModeGuest
9500 || !pCtx->cs.Attr.n.u1Long)
9501 {
9502 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
9503 }
9504 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
9505 * must be identical if the "IA-32e mode guest" VM-entry
9506 * control is 1 and CS.L is 1. No check applies if the
9507 * CPU supports 64 linear-address bits. */
9508
9509 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
9510 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
9511 AssertRCBreak(rc);
9512 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
9513 VMX_IGS_RFLAGS_RESERVED);
9514 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
9515 u32Eflags = u64Val;
9516 }
9517 else
9518#endif
9519 {
9520 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
9521 AssertRCBreak(rc);
9522 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */
9523 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
9524 }
9525
9526 if ( fLongModeGuest
9527 || ( fUnrestrictedGuest
9528 && !(u32GuestCR0 & X86_CR0_PE)))
9529 {
9530 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
9531 }
9532
9533 uint32_t u32EntryInfo;
9534 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
9535 AssertRCBreak(rc);
9536 if ( VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
9537 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
9538 {
9539 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
9540 }
9541
9542 /*
9543 * 64-bit checks.
9544 */
9545#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9546 if (HMVMX_IS_64BIT_HOST_MODE())
9547 {
9548 if (fLongModeGuest)
9549 {
9550 HMVMX_CHECK_BREAK(u32GuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
9551 HMVMX_CHECK_BREAK(u32GuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
9552 }
9553
9554 if ( !fLongModeGuest
9555 && (u32GuestCR4 & X86_CR4_PCIDE))
9556 {
9557 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
9558 }
9559
9560 /** @todo CR3 field must be such that bits 63:52 and bits in the range
9561 * 51:32 beyond the processor's physical-address width are 0. */
9562
9563 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
9564 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
9565 {
9566 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
9567 }
9568
9569 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
9570 AssertRCBreak(rc);
9571 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
9572
9573 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
9574 AssertRCBreak(rc);
9575 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
9576 }
9577#endif
9578
9579 /*
9580 * PERF_GLOBAL MSR.
9581 */
9582 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR)
9583 {
9584 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
9585 AssertRCBreak(rc);
9586 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
9587 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
9588 }
9589
9590 /*
9591 * PAT MSR.
9592 */
9593 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR)
9594 {
9595 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
9596 AssertRCBreak(rc);
9597 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
9598 for (unsigned i = 0; i < 8; i++)
9599 {
9600 uint8_t u8Val = (u64Val & 0xff);
9601 if ( u8Val != 0 /* UC */
9602 && u8Val != 1 /* WC */
9603 && u8Val != 4 /* WT */
9604 && u8Val != 5 /* WP */
9605 && u8Val != 6 /* WB */
9606 && u8Val != 7 /* UC- */)
9607 {
9608 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
9609 }
9610 u64Val >>= 8;
9611 }
9612 }
9613
9614 /*
9615 * EFER MSR.
9616 */
9617 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
9618 {
9619 Assert(pVM->hm.s.vmx.fSupportsVmcsEfer);
9620 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
9621 AssertRCBreak(rc);
9622 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
9623 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
9624 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
9625 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
9626 HMVMX_CHECK_BREAK( fUnrestrictedGuest
9627 || !(u32GuestCR0 & X86_CR0_PG)
9628 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
9629 VMX_IGS_EFER_LMA_LME_MISMATCH);
9630 }
9631
9632 /*
9633 * Segment registers.
9634 */
9635 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9636 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
9637 if (!(u32Eflags & X86_EFL_VM))
9638 {
9639 /* CS */
9640 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
9641 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
9642 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
9643 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
9644 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9645 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
9646 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9647 /* CS cannot be loaded with NULL in protected mode. */
9648 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
9649 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
9650 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
9651 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
9652 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
9653 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
9654 else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
9655 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
9656 else
9657 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
9658
9659 /* SS */
9660 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9661 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
9662 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
9663 if ( !(pCtx->cr0 & X86_CR0_PE)
9664 || pCtx->cs.Attr.n.u4Type == 3)
9665 {
9666 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
9667 }
9668 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
9669 {
9670 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
9671 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
9672 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
9673 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
9674 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
9675 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9676 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
9677 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9678 }
9679
9680 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
9681 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
9682 {
9683 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
9684 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
9685 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9686 || pCtx->ds.Attr.n.u4Type > 11
9687 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9688 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
9689 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
9690 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
9691 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9692 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
9693 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9694 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9695 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
9696 }
9697 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
9698 {
9699 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
9700 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
9701 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9702 || pCtx->es.Attr.n.u4Type > 11
9703 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9704 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
9705 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
9706 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
9707 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9708 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
9709 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9710 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9711 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
9712 }
9713 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
9714 {
9715 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
9716 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
9717 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9718 || pCtx->fs.Attr.n.u4Type > 11
9719 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
9720 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
9721 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
9722 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
9723 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
9724 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
9725 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
9726 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9727 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
9728 }
9729 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
9730 {
9731 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
9732 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
9733 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9734 || pCtx->gs.Attr.n.u4Type > 11
9735 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
9736 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
9737 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
9738 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
9739 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
9740 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
9741 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
9742 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9743 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
9744 }
9745 /* 64-bit capable CPUs. */
9746#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9747 if (HMVMX_IS_64BIT_HOST_MODE())
9748 {
9749 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
9750 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
9751 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9752 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
9753 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
9754 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
9755 VMX_IGS_LONGMODE_SS_BASE_INVALID);
9756 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
9757 VMX_IGS_LONGMODE_DS_BASE_INVALID);
9758 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
9759 VMX_IGS_LONGMODE_ES_BASE_INVALID);
9760 }
9761#endif
9762 }
9763 else
9764 {
9765 /* V86 mode checks. */
9766 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
9767 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
9768 {
9769 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
9770 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
9771 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
9772 }
9773 else
9774 {
9775 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
9776 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
9777 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
9778 }
9779
9780 /* CS */
9781 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
9782 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
9783 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
9784 /* SS */
9785 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
9786 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
9787 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
9788 /* DS */
9789 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
9790 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
9791 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
9792 /* ES */
9793 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
9794 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
9795 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
9796 /* FS */
9797 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
9798 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
9799 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
9800 /* GS */
9801 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
9802 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
9803 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
9804 /* 64-bit capable CPUs. */
9805#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9806 if (HMVMX_IS_64BIT_HOST_MODE())
9807 {
9808 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
9809 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
9810 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9811 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
9812 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
9813 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
9814 VMX_IGS_LONGMODE_SS_BASE_INVALID);
9815 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
9816 VMX_IGS_LONGMODE_DS_BASE_INVALID);
9817 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
9818 VMX_IGS_LONGMODE_ES_BASE_INVALID);
9819 }
9820#endif
9821 }
9822
9823 /*
9824 * TR.
9825 */
9826 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
9827 /* 64-bit capable CPUs. */
9828#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9829 if (HMVMX_IS_64BIT_HOST_MODE())
9830 {
9831 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
9832 }
9833#endif
9834 if (fLongModeGuest)
9835 {
9836 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
9837 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
9838 }
9839 else
9840 {
9841 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
9842 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
9843 VMX_IGS_TR_ATTR_TYPE_INVALID);
9844 }
9845 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
9846 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
9847 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
9848 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
9849 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
9850 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
9851 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
9852 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
9853
9854 /*
9855 * GDTR and IDTR.
9856 */
9857#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9858 if (HMVMX_IS_64BIT_HOST_MODE())
9859 {
9860 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
9861 AssertRCBreak(rc);
9862 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
9863
9864 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
9865 AssertRCBreak(rc);
9866 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
9867 }
9868#endif
9869
9870 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
9871 AssertRCBreak(rc);
9872 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
9873
9874 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
9875 AssertRCBreak(rc);
9876 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
9877
9878 /*
9879 * Guest Non-Register State.
9880 */
9881 /* Activity State. */
9882 uint32_t u32ActivityState;
9883 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
9884 AssertRCBreak(rc);
9885 HMVMX_CHECK_BREAK( !u32ActivityState
9886 || (u32ActivityState & MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.Msrs.u64Misc)),
9887 VMX_IGS_ACTIVITY_STATE_INVALID);
9888 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
9889 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
9890 uint32_t u32IntrState;
9891 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32IntrState);
9892 AssertRCBreak(rc);
9893 if ( u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
9894 || u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9895 {
9896 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
9897 }
9898
9899 /** @todo Activity state and injecting interrupts. Left as a todo since we
9900 * currently don't use activity states but ACTIVE. */
9901
9902 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
9903 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
9904
9905 /* Guest interruptibility-state. */
9906 HMVMX_CHECK_BREAK(!(u32IntrState & 0xfffffff0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
9907 HMVMX_CHECK_BREAK((u32IntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
9908 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS))
9909 != ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
9910 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9911 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
9912 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
9913 || !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
9914 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
9915 if (VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo))
9916 {
9917 if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
9918 {
9919 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9920 && !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9921 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
9922 }
9923 else if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9924 {
9925 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9926 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
9927 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
9928 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
9929 }
9930 }
9931 /** @todo Assumes the processor is not in SMM. */
9932 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
9933 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
9934 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
9935 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
9936 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
9937 if ( (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
9938 && VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
9939 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9940 {
9941 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI),
9942 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
9943 }
9944
9945 /* Pending debug exceptions. */
9946 if (HMVMX_IS_64BIT_HOST_MODE())
9947 {
9948 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val);
9949 AssertRCBreak(rc);
9950 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
9951 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
9952 u32Val = u64Val; /* For pending debug exceptions checks below. */
9953 }
9954 else
9955 {
9956 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val);
9957 AssertRCBreak(rc);
9958 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
9959 HMVMX_CHECK_BREAK(!(u64Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
9960 }
9961
9962 if ( (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9963 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS)
9964 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
9965 {
9966 if ( (u32Eflags & X86_EFL_TF)
9967 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
9968 {
9969 /* Bit 14 is PendingDebug.BS. */
9970 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
9971 }
9972 if ( !(u32Eflags & X86_EFL_TF)
9973 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
9974 {
9975 /* Bit 14 is PendingDebug.BS. */
9976 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
9977 }
9978 }
9979
9980 /* VMCS link pointer. */
9981 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
9982 AssertRCBreak(rc);
9983 if (u64Val != UINT64_C(0xffffffffffffffff))
9984 {
9985 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
9986 /** @todo Bits beyond the processor's physical-address width MBZ. */
9987 /** @todo 32-bit located in memory referenced by value of this field (as a
9988 * physical address) must contain the processor's VMCS revision ID. */
9989 /** @todo SMM checks. */
9990 }
9991
9992 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
9993 * not using Nested Paging? */
9994 if ( pVM->hm.s.fNestedPaging
9995 && !fLongModeGuest
9996 && CPUMIsGuestInPAEModeEx(pCtx))
9997 {
9998 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
9999 AssertRCBreak(rc);
10000 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10001
10002 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
10003 AssertRCBreak(rc);
10004 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10005
10006 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
10007 AssertRCBreak(rc);
10008 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10009
10010 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
10011 AssertRCBreak(rc);
10012 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10013 }
10014
10015 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
10016 if (uError == VMX_IGS_ERROR)
10017 uError = VMX_IGS_REASON_NOT_FOUND;
10018 } while (0);
10019
10020 pVCpu->hm.s.u32HMError = uError;
10021 return uError;
10022
10023#undef HMVMX_ERROR_BREAK
10024#undef HMVMX_CHECK_BREAK
10025}
10026
10027/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10028/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
10029/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10030
10031/** @name VM-exit handlers.
10032 * @{
10033 */
10034
10035/**
10036 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
10037 */
10038HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10039{
10040 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10041 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
10042 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
10043 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
10044 return VINF_SUCCESS;
10045 return VINF_EM_RAW_INTERRUPT;
10046}
10047
10048
10049/**
10050 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
10051 */
10052HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10053{
10054 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10055 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
10056
10057 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
10058 AssertRCReturn(rc, rc);
10059
10060 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
10061 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)
10062 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
10063 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
10064
10065 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
10066 {
10067 /*
10068 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we injected it ourselves and
10069 * anything we inject is not going to cause a VM-exit directly for the event being injected.
10070 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
10071 *
10072 * Dispatch the NMI to the host. See Intel spec. 27.5.5 "Updating Non-Register State".
10073 */
10074 VMXDispatchHostNmi();
10075 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
10076 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
10077 return VINF_SUCCESS;
10078 }
10079
10080 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
10081 rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
10082 if (RT_UNLIKELY(rc != VINF_SUCCESS))
10083 {
10084 if (rc == VINF_HM_DOUBLE_FAULT)
10085 rc = VINF_SUCCESS;
10086 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
10087 return rc;
10088 }
10089
10090 uint32_t uExitIntInfo = pVmxTransient->uExitIntInfo;
10091 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntInfo);
10092 switch (uIntType)
10093 {
10094 case VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT: /* Privileged software exception. (#DB from ICEBP) */
10095 Assert(uVector == X86_XCPT_DB);
10096 /* no break */
10097 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
10098 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT);
10099 /* no break */
10100 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
10101 {
10102 switch (uVector)
10103 {
10104 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
10105 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
10106 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;
10107 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
10108 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
10109 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
10110#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
10111 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
10112 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10113 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
10114 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10115 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
10116 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10117 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
10118 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10119 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
10120 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10121 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS);
10122 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10123#endif
10124 default:
10125 {
10126 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10127 AssertRCReturn(rc, rc);
10128
10129 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
10130 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
10131 {
10132 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
10133 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
10134 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
10135
10136 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
10137 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
10138 AssertRCReturn(rc, rc);
10139 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
10140 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode,
10141 0 /* GCPtrFaultAddress */);
10142 AssertRCReturn(rc, rc);
10143 }
10144 else
10145 {
10146 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
10147 pVCpu->hm.s.u32HMError = uVector;
10148 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
10149 }
10150 break;
10151 }
10152 }
10153 break;
10154 }
10155
10156 default:
10157 {
10158 pVCpu->hm.s.u32HMError = uExitIntInfo;
10159 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10160 AssertMsgFailed(("Unexpected interruption info %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntInfo)));
10161 break;
10162 }
10163 }
10164 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
10165 return rc;
10166}
10167
10168
10169/**
10170 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10171 */
10172HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10173{
10174 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10175
10176 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
10177 hmR0VmxClearIntWindowExitVmcs(pVCpu);
10178
10179 /* Deliver the pending interrupts via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
10180 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
10181 return VINF_SUCCESS;
10182}
10183
10184
10185/**
10186 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10187 */
10188HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10189{
10190 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10191 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)))
10192 {
10193 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
10194 HMVMX_RETURN_UNEXPECTED_EXIT();
10195 }
10196
10197 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));
10198
10199 /*
10200 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
10201 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
10202 */
10203 uint32_t uIntrState = 0;
10204 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
10205 AssertRCReturn(rc, rc);
10206
10207 bool const fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
10208 if ( fBlockSti
10209 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
10210 {
10211 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
10212 }
10213
10214 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
10215 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
10216
10217 /* Deliver the pending NMI via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
10218 return VINF_SUCCESS;
10219}
10220
10221
10222/**
10223 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10224 */
10225HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10226{
10227 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10228 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
10229 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10230}
10231
10232
10233/**
10234 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
10235 */
10236HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10237{
10238 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10239 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
10240 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10241}
10242
10243
10244/**
10245 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
10246 */
10247HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10248{
10249 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10250 PVM pVM = pVCpu->CTX_SUFF(pVM);
10251 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10252 if (RT_LIKELY(rc == VINF_SUCCESS))
10253 {
10254 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10255 Assert(pVmxTransient->cbInstr == 2);
10256 }
10257 else
10258 {
10259 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
10260 rc = VERR_EM_INTERPRETER;
10261 }
10262 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
10263 return rc;
10264}
10265
10266
10267/**
10268 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
10269 */
10270HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10271{
10272 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10273 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
10274 AssertRCReturn(rc, rc);
10275
10276 if (pMixedCtx->cr4 & X86_CR4_SMXE)
10277 return VINF_EM_RAW_EMULATE_INSTR;
10278
10279 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
10280 HMVMX_RETURN_UNEXPECTED_EXIT();
10281}
10282
10283
10284/**
10285 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10286 */
10287HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10288{
10289 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10290 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10291 AssertRCReturn(rc, rc);
10292
10293 PVM pVM = pVCpu->CTX_SUFF(pVM);
10294 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10295 if (RT_LIKELY(rc == VINF_SUCCESS))
10296 {
10297 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10298 Assert(pVmxTransient->cbInstr == 2);
10299 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
10300 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
10301 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10302 }
10303 else
10304 rc = VERR_EM_INTERPRETER;
10305 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
10306 return rc;
10307}
10308
10309
10310/**
10311 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10312 */
10313HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10314{
10315 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10316 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10317 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
10318 AssertRCReturn(rc, rc);
10319
10320 PVM pVM = pVCpu->CTX_SUFF(pVM);
10321 rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
10322 if (RT_LIKELY(rc == VINF_SUCCESS))
10323 {
10324 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10325 Assert(pVmxTransient->cbInstr == 3);
10326 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
10327 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
10328 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10329 }
10330 else
10331 {
10332 AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
10333 rc = VERR_EM_INTERPRETER;
10334 }
10335 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
10336 return rc;
10337}
10338
10339
10340/**
10341 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10342 */
10343HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10344{
10345 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10346 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10347 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
10348 AssertRCReturn(rc, rc);
10349
10350 PVM pVM = pVCpu->CTX_SUFF(pVM);
10351 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10352 if (RT_LIKELY(rc == VINF_SUCCESS))
10353 {
10354 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10355 Assert(pVmxTransient->cbInstr == 2);
10356 }
10357 else
10358 {
10359 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
10360 rc = VERR_EM_INTERPRETER;
10361 }
10362 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
10363 return rc;
10364}
10365
10366
10367/**
10368 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
10369 */
10370HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10371{
10372 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10373 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);
10374
10375 if (pVCpu->hm.s.fHypercallsEnabled)
10376 {
10377#if 0
10378 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10379 AssertRCReturn(rc, rc);
10380#else
10381 /* Aggressive state sync. for now. */
10382 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
10383 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* For long-mode checks in gimKvmHypercall(). */
10384#endif
10385 AssertRCReturn(rc, rc);
10386
10387 rc = GIMHypercall(pVCpu, pMixedCtx);
10388 if (RT_SUCCESS(rc))
10389 {
10390 /* If the hypercall changes anything other than guest general-purpose registers,
10391 we would need to reload the guest changed bits here before VM-reentry. */
10392 hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10393 return VINF_SUCCESS;
10394 }
10395 }
10396
10397 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
10398 return VINF_SUCCESS;
10399}
10400
10401
10402/**
10403 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10404 */
10405HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10406{
10407 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10408 PVM pVM = pVCpu->CTX_SUFF(pVM);
10409 Assert(!pVM->hm.s.fNestedPaging);
10410
10411 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10412 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
10413 AssertRCReturn(rc, rc);
10414
10415 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
10416 rc = VBOXSTRICTRC_VAL(rc2);
10417 if (RT_LIKELY(rc == VINF_SUCCESS))
10418 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10419 else
10420 {
10421 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RX64 failed with %Rrc\n",
10422 pVmxTransient->uExitQualification, rc));
10423 }
10424 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
10425 return rc;
10426}
10427
10428
10429/**
10430 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10431 */
10432HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10433{
10434 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10435 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10436 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10437 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10438 AssertRCReturn(rc, rc);
10439
10440 PVM pVM = pVCpu->CTX_SUFF(pVM);
10441 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10442 if (RT_LIKELY(rc == VINF_SUCCESS))
10443 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10444 else
10445 {
10446 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
10447 rc = VERR_EM_INTERPRETER;
10448 }
10449 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
10450 return rc;
10451}
10452
10453
10454/**
10455 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10456 */
10457HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10458{
10459 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10460 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10461 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10462 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10463 AssertRCReturn(rc, rc);
10464
10465 PVM pVM = pVCpu->CTX_SUFF(pVM);
10466 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10467 rc = VBOXSTRICTRC_VAL(rc2);
10468 if (RT_LIKELY( rc == VINF_SUCCESS
10469 || rc == VINF_EM_HALT))
10470 {
10471 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10472 AssertRCReturn(rc3, rc3);
10473
10474 if ( rc == VINF_EM_HALT
10475 && EMMonitorWaitShouldContinue(pVCpu, pMixedCtx))
10476 {
10477 rc = VINF_SUCCESS;
10478 }
10479 }
10480 else
10481 {
10482 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
10483 rc = VERR_EM_INTERPRETER;
10484 }
10485 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
10486 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
10487 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
10488 return rc;
10489}
10490
10491
10492/**
10493 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
10494 */
10495HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10496{
10497 /*
10498 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
10499 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
10500 * executing VMCALL in VMX root operation. If we get here, something funny is going on.
10501 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
10502 */
10503 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10504 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10505 HMVMX_RETURN_UNEXPECTED_EXIT();
10506}
10507
10508
10509/**
10510 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
10511 */
10512HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10513{
10514 /*
10515 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
10516 * root operation. Only an STM (SMM transfer monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL
10517 * in VMX root mode or receive an SMI. If we get here, something funny is going on.
10518 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
10519 */
10520 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10521 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10522 HMVMX_RETURN_UNEXPECTED_EXIT();
10523}
10524
10525
10526/**
10527 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
10528 */
10529HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10530{
10531 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
10532 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10533 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10534 HMVMX_RETURN_UNEXPECTED_EXIT();
10535}
10536
10537
10538/**
10539 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
10540 */
10541HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10542{
10543 /*
10544 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
10545 * don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
10546 * See Intel spec. 25.3 "Other Causes of VM-exits".
10547 */
10548 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10549 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10550 HMVMX_RETURN_UNEXPECTED_EXIT();
10551}
10552
10553
10554/**
10555 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
10556 * VM-exit.
10557 */
10558HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10559{
10560 /*
10561 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
10562 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery" and Intel spec. 29.3 "VMX Instructions" for "VMXON".
10563 *
10564 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these VM-exits.
10565 * See Intel spec. "23.8 Restrictions on VMX operation".
10566 */
10567 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10568 return VINF_SUCCESS;
10569}
10570
10571
10572/**
10573 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
10574 * VM-exit.
10575 */
10576HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10577{
10578 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10579 return VINF_EM_RESET;
10580}
10581
10582
10583/**
10584 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10585 */
10586HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10587{
10588 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10589 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
10590 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
10591 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10592 AssertRCReturn(rc, rc);
10593
10594 pMixedCtx->rip++;
10595 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
10596 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
10597 rc = VINF_SUCCESS;
10598 else
10599 rc = VINF_EM_HALT;
10600
10601 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
10602 if (rc != VINF_SUCCESS)
10603 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
10604 return rc;
10605}
10606
10607
10608/**
10609 * VM-exit handler for instructions that result in a #UD exception delivered to
10610 * the guest.
10611 */
10612HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10613{
10614 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10615 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
10616 return VINF_SUCCESS;
10617}
10618
10619
10620/**
10621 * VM-exit handler for expiry of the VMX preemption timer.
10622 */
10623HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10624{
10625 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10626
10627 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
10628 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10629
10630 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
10631 PVM pVM = pVCpu->CTX_SUFF(pVM);
10632 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
10633 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
10634 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
10635}
10636
10637
10638/**
10639 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
10640 */
10641HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10642{
10643 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10644
10645 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
10646 rc |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/);
10647 rc |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
10648 AssertRCReturn(rc, rc);
10649
10650 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbInstr);
10651 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);
10652
10653 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
10654
10655 return VBOXSTRICTRC_TODO(rcStrict);
10656}
10657
10658
10659/**
10660 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10661 */
10662HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10663{
10664 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10665
10666 /* The guest should not invalidate the host CPU's TLBs, fallback to interpreter. */
10667 /** @todo implement EMInterpretInvpcid() */
10668 return VERR_EM_INTERPRETER;
10669}
10670
10671
10672/**
10673 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
10674 * Error VM-exit.
10675 */
10676HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10677{
10678 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10679 AssertRCReturn(rc, rc);
10680
10681 rc = hmR0VmxCheckVmcsCtls(pVCpu);
10682 AssertRCReturn(rc, rc);
10683
10684 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
10685 NOREF(uInvalidReason);
10686
10687#ifdef VBOX_STRICT
10688 uint32_t uIntrState;
10689 HMVMXHCUINTREG uHCReg;
10690 uint64_t u64Val;
10691 uint32_t u32Val;
10692
10693 rc = hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
10694 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
10695 rc |= hmR0VmxReadEntryInstrLenVmcs(pVmxTransient);
10696 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
10697 AssertRCReturn(rc, rc);
10698
10699 Log4(("uInvalidReason %u\n", uInvalidReason));
10700 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
10701 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
10702 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
10703 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));
10704
10705 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
10706 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
10707 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
10708 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
10709 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
10710 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
10711 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
10712 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
10713 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
10714 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
10715 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
10716 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
10717#else
10718 NOREF(pVmxTransient);
10719#endif
10720
10721 HMDumpRegs(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
10722 return VERR_VMX_INVALID_GUEST_STATE;
10723}
10724
10725
10726/**
10727 * VM-exit handler for VM-entry failure due to an MSR-load
10728 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
10729 */
10730HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10731{
10732 NOREF(pVmxTransient);
10733 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
10734 HMVMX_RETURN_UNEXPECTED_EXIT();
10735}
10736
10737
10738/**
10739 * VM-exit handler for VM-entry failure due to a machine-check event
10740 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
10741 */
10742HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10743{
10744 NOREF(pVmxTransient);
10745 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
10746 HMVMX_RETURN_UNEXPECTED_EXIT();
10747}
10748
10749
10750/**
10751 * VM-exit handler for all undefined reasons. Should never ever happen.. in
10752 * theory.
10753 */
10754HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10755{
10756 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
10757 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient);
10758 return VERR_VMX_UNDEFINED_EXIT_CODE;
10759}
10760
10761
10762/**
10763 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
10764 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
10765 * Conditional VM-exit.
10766 */
10767HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10768{
10769 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10770
10771 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
10772 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
10773 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
10774 return VERR_EM_INTERPRETER;
10775 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10776 HMVMX_RETURN_UNEXPECTED_EXIT();
10777}
10778
10779
10780/**
10781 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
10782 */
10783HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10784{
10785 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10786
10787 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
10788 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
10789 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
10790 return VERR_EM_INTERPRETER;
10791 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10792 HMVMX_RETURN_UNEXPECTED_EXIT();
10793}
10794
10795
10796/**
10797 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10798 */
10799HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10800{
10801 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10802
10803 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
10804 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10805 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10806 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10807 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10808 {
10809 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
10810 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
10811 }
10812 AssertRCReturn(rc, rc);
10813 Log4(("ecx=%#RX32\n", pMixedCtx->ecx));
10814
10815#ifdef VBOX_STRICT
10816 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
10817 {
10818 if ( hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)
10819 && pMixedCtx->ecx != MSR_K6_EFER)
10820 {
10821 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
10822 HMVMX_RETURN_UNEXPECTED_EXIT();
10823 }
10824# if HC_ARCH_BITS == 64
10825 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests
10826 && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10827 {
10828 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
10829 HMVMX_RETURN_UNEXPECTED_EXIT();
10830 }
10831# endif
10832 }
10833#endif
10834
10835 PVM pVM = pVCpu->CTX_SUFF(pVM);
10836 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10837 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
10838 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
10839 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
10840 if (RT_LIKELY(rc == VINF_SUCCESS))
10841 {
10842 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10843 Assert(pVmxTransient->cbInstr == 2);
10844 }
10845 return rc;
10846}
10847
10848
10849/**
10850 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10851 */
10852HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10853{
10854 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10855 PVM pVM = pVCpu->CTX_SUFF(pVM);
10856 int rc = VINF_SUCCESS;
10857
10858 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
10859 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10860 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10861 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10862 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10863 {
10864 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
10865 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
10866 }
10867 AssertRCReturn(rc, rc);
10868 Log4(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", pMixedCtx->ecx, pMixedCtx->edx, pMixedCtx->eax));
10869
10870 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10871 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
10872 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
10873
10874 if (RT_LIKELY(rc == VINF_SUCCESS))
10875 {
10876 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10877
10878 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
10879 if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START
10880 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
10881 {
10882 /* We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register
10883 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before
10884 EMInterpretWrmsr() changes it. */
10885 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
10886 }
10887 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
10888 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10889 else if (pMixedCtx->ecx == MSR_K6_EFER)
10890 {
10891 /*
10892 * If the guest touches EFER we need to update the VM-Entry and VM-Exit controls as well,
10893 * even if it is -not- touching bits that cause paging mode changes (LMA/LME). We care about
10894 * the other bits as well, SCE and NXE. See @bugref{7368}.
10895 */
10896 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS);
10897 }
10898
10899 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */
10900 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10901 {
10902 switch (pMixedCtx->ecx)
10903 {
10904 case MSR_IA32_SYSENTER_CS: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
10905 case MSR_IA32_SYSENTER_EIP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
10906 case MSR_IA32_SYSENTER_ESP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
10907 case MSR_K8_FS_BASE: /* no break */
10908 case MSR_K8_GS_BASE: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break;
10909 case MSR_K6_EFER: /* already handled above */ break;
10910 default:
10911 {
10912 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
10913 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
10914#if HC_ARCH_BITS == 64
10915 else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10916 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
10917#endif
10918 break;
10919 }
10920 }
10921 }
10922#ifdef VBOX_STRICT
10923 else
10924 {
10925 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
10926 switch (pMixedCtx->ecx)
10927 {
10928 case MSR_IA32_SYSENTER_CS:
10929 case MSR_IA32_SYSENTER_EIP:
10930 case MSR_IA32_SYSENTER_ESP:
10931 case MSR_K8_FS_BASE:
10932 case MSR_K8_GS_BASE:
10933 {
10934 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
10935 HMVMX_RETURN_UNEXPECTED_EXIT();
10936 }
10937
10938 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
10939 default:
10940 {
10941 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
10942 {
10943 /* EFER writes are always intercepted, see hmR0VmxLoadGuestMsrs(). */
10944 if (pMixedCtx->ecx != MSR_K6_EFER)
10945 {
10946 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
10947 pMixedCtx->ecx));
10948 HMVMX_RETURN_UNEXPECTED_EXIT();
10949 }
10950 }
10951
10952#if HC_ARCH_BITS == 64
10953 if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10954 {
10955 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
10956 HMVMX_RETURN_UNEXPECTED_EXIT();
10957 }
10958#endif
10959 break;
10960 }
10961 }
10962 }
10963#endif /* VBOX_STRICT */
10964 }
10965 return rc;
10966}
10967
10968
10969/**
10970 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10971 */
10972HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10973{
10974 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10975
10976 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT. */
10977 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
10978 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT)
10979 return VERR_EM_INTERPRETER;
10980 AssertMsgFailed(("Unexpected PAUSE exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10981 HMVMX_RETURN_UNEXPECTED_EXIT();
10982}
10983
10984
10985/**
10986 * VM-exit handler for when the TPR value is lowered below the specified
10987 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
10988 */
10989HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10990{
10991 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10992 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
10993
10994 /*
10995 * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
10996 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectPendingEvent() and
10997 * resume guest execution.
10998 */
10999 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
11000 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
11001 return VINF_SUCCESS;
11002}
11003
11004
11005/**
11006 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
11007 * VM-exit.
11008 *
11009 * @retval VINF_SUCCESS when guest execution can continue.
11010 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
11011 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
11012 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
11013 * interpreter.
11014 */
11015HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11016{
11017 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11018 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
11019 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11020 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11021 AssertRCReturn(rc, rc);
11022
11023 RTGCUINTPTR const uExitQualification = pVmxTransient->uExitQualification;
11024 uint32_t const uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
11025 PVM pVM = pVCpu->CTX_SUFF(pVM);
11026 VBOXSTRICTRC rcStrict;
11027 rc = hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, true /*fNeedRsp*/);
11028 switch (uAccessType)
11029 {
11030 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */
11031 {
11032 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11033 AssertRCReturn(rc, rc);
11034
11035 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr,
11036 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
11037 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
11038 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT || rcStrict == VINF_PGM_CHANGE_MODE
11039 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11040 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
11041 {
11042 case 0: /* CR0 */
11043 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
11044 Log4(("CRX CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr0));
11045 break;
11046 case 2: /* CR2 */
11047 /* Nothing to do here, CR2 it's not part of the VMCS. */
11048 break;
11049 case 3: /* CR3 */
11050 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
11051 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
11052 Log4(("CRX CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr3));
11053 break;
11054 case 4: /* CR4 */
11055 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
11056 Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n",
11057 VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
11058 break;
11059 case 8: /* CR8 */
11060 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
11061 /* CR8 contains the APIC TPR. Was updated by IEMExecDecodedMovCRxWrite(). */
11062 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
11063 break;
11064 default:
11065 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
11066 break;
11067 }
11068
11069 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
11070 break;
11071 }
11072
11073 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */
11074 {
11075 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11076 AssertRCReturn(rc, rc);
11077
11078 Assert( !pVM->hm.s.fNestedPaging
11079 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
11080 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
11081
11082 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
11083 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
11084 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
11085
11086 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr,
11087 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
11088 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
11089 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11090 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
11091 Log4(("CRX CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
11092 VBOXSTRICTRC_VAL(rcStrict)));
11093 break;
11094 }
11095
11096 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
11097 {
11098 AssertRCReturn(rc, rc);
11099 rcStrict = IEMExecDecodedClts(pVCpu, pVmxTransient->cbInstr);
11100 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11101 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
11102 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
11103 Log4(("CRX CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
11104 break;
11105 }
11106
11107 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
11108 {
11109 AssertRCReturn(rc, rc);
11110 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr,
11111 VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
11112 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT || rcStrict == VINF_PGM_CHANGE_MODE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11113 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
11114 Log4(("CRX LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
11115 break;
11116 }
11117
11118 default:
11119 AssertMsgFailedReturn(("Invalid access-type in Mov CRx VM-exit qualification %#x\n", uAccessType),
11120 VERR_VMX_UNEXPECTED_EXCEPTION);
11121 }
11122
11123 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);
11124 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
11125 return VBOXSTRICTRC_TODO(rcStrict);
11126}
11127
11128
11129/**
11130 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
11131 * VM-exit.
11132 */
11133HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11134{
11135 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11136 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
11137
11138 int rc2 = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11139 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11140 rc2 |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
11141 rc2 |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
11142 rc2 |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
11143 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
11144 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
11145 AssertRCReturn(rc2, rc2);
11146
11147 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
11148 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
11149 uint8_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
11150 bool fIOWrite = ( VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
11151 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
11152 bool fIOString = VMX_EXIT_QUALIFICATION_IO_IS_STRING(pVmxTransient->uExitQualification);
11153 bool fStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
11154 AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_VMX_IPE_1);
11155
11156 /* I/O operation lookup arrays. */
11157 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */
11158 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
11159
11160 VBOXSTRICTRC rcStrict;
11161 uint32_t const cbValue = s_aIOSizes[uIOWidth];
11162 uint32_t const cbInstr = pVmxTransient->cbInstr;
11163 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
11164 PVM pVM = pVCpu->CTX_SUFF(pVM);
11165 if (fIOString)
11166 {
11167#if 0 /* Not yet ready. IEM gurus with debian 32-bit guest without NP (on ATA reads). See @bugref{5752#c158} */
11168 /*
11169 * INS/OUTS - I/O String instruction.
11170 *
11171 * Use instruction-information if available, otherwise fall back on
11172 * interpreting the instruction.
11173 */
11174 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
11175 AssertReturn(pMixedCtx->dx == uIOPort, VERR_VMX_IPE_2);
11176 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo))
11177 {
11178 rc2 = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
11179 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
11180 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11181 AssertRCReturn(rc2, rc2);
11182 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
11183 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
11184 IEMMODE enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
11185 bool fRep = VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification);
11186 if (fIOWrite)
11187 {
11188 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
11189 pVmxTransient->ExitInstrInfo.StrIo.iSegReg);
11190 }
11191 else
11192 {
11193 /*
11194 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
11195 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
11196 * See Intel Instruction spec. for "INS".
11197 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
11198 */
11199 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr);
11200 }
11201 }
11202 else
11203 {
11204 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
11205 rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11206 AssertRCReturn(rc2, rc2);
11207 rcStrict = IEMExecOne(pVCpu);
11208 }
11209 /** @todo IEM needs to be setting these flags somehow. */
11210 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
11211 fUpdateRipAlready = true;
11212#else
11213 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
11214 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL /* pcbInstr */);
11215 if (RT_SUCCESS(rcStrict))
11216 {
11217 if (fIOWrite)
11218 {
11219 rcStrict = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
11220 (DISCPUMODE)pDis->uAddrMode, cbValue);
11221 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
11222 }
11223 else
11224 {
11225 rcStrict = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
11226 (DISCPUMODE)pDis->uAddrMode, cbValue);
11227 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
11228 }
11229 }
11230 else
11231 {
11232 AssertMsg(rcStrict == VERR_EM_INTERPRETER, ("rcStrict=%Rrc RIP %#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->rip));
11233 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
11234 }
11235#endif
11236 }
11237 else
11238 {
11239 /*
11240 * IN/OUT - I/O instruction.
11241 */
11242 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
11243 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
11244 Assert(!VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification));
11245 if (fIOWrite)
11246 {
11247 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue);
11248 if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
11249 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
11250 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
11251 }
11252 else
11253 {
11254 uint32_t u32Result = 0;
11255 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
11256 if (IOM_SUCCESS(rcStrict))
11257 {
11258 /* Save result of I/O IN instr. in AL/AX/EAX. */
11259 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
11260 }
11261 else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
11262 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
11263 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
11264 }
11265 }
11266
11267 if (IOM_SUCCESS(rcStrict))
11268 {
11269 if (!fUpdateRipAlready)
11270 {
11271 pMixedCtx->rip += cbInstr;
11272 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
11273 }
11274
11275 /*
11276 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru while booting Fedora 17 64-bit guest.
11277 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
11278 */
11279 if (fIOString)
11280 {
11281 /** @todo Single-step for INS/OUTS with REP prefix? */
11282 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
11283 }
11284 else if (fStepping)
11285 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11286
11287 /*
11288 * If any I/O breakpoints are armed, we need to check if one triggered
11289 * and take appropriate action.
11290 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
11291 */
11292 rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11293 AssertRCReturn(rc2, rc2);
11294
11295 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
11296 * execution engines about whether hyper BPs and such are pending. */
11297 uint32_t const uDr7 = pMixedCtx->dr[7];
11298 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
11299 && X86_DR7_ANY_RW_IO(uDr7)
11300 && (pMixedCtx->cr4 & X86_CR4_DE))
11301 || DBGFBpIsHwIoArmed(pVM)))
11302 {
11303 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
11304
11305 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
11306 VMMRZCallRing3Disable(pVCpu);
11307 HM_DISABLE_PREEMPT();
11308
11309 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
11310
11311 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue);
11312 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
11313 {
11314 /* Raise #DB. */
11315 if (fIsGuestDbgActive)
11316 ASMSetDR6(pMixedCtx->dr[6]);
11317 if (pMixedCtx->dr[7] != uDr7)
11318 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
11319
11320 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
11321 }
11322 /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */
11323 else if ( rcStrict2 != VINF_SUCCESS
11324 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
11325 rcStrict = rcStrict2;
11326
11327 HM_RESTORE_PREEMPT();
11328 VMMRZCallRing3Enable(pVCpu);
11329 }
11330 }
11331
11332#ifdef DEBUG
11333 if (rcStrict == VINF_IOM_R3_IOPORT_READ)
11334 Assert(!fIOWrite);
11335 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
11336 Assert(fIOWrite);
11337 else
11338 {
11339 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
11340 * statuses, that the VMM device and some others may return. See
11341 * IOM_SUCCESS() for guidance. */
11342 AssertMsg( RT_FAILURE(rcStrict)
11343 || rcStrict == VINF_SUCCESS
11344 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
11345 || rcStrict == VINF_EM_DBG_BREAKPOINT
11346 || rcStrict == VINF_EM_RAW_GUEST_TRAP
11347 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11348 }
11349#endif
11350
11351 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
11352 return VBOXSTRICTRC_TODO(rcStrict);
11353}
11354
11355
11356/**
11357 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
11358 * VM-exit.
11359 */
11360HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11361{
11362 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11363
11364 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
11365 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11366 AssertRCReturn(rc, rc);
11367 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
11368 {
11369 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
11370 AssertRCReturn(rc, rc);
11371 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
11372 {
11373 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
11374
11375 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
11376 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
11377
11378 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */
11379 Assert(!pVCpu->hm.s.Event.fPending);
11380 pVCpu->hm.s.Event.fPending = true;
11381 pVCpu->hm.s.Event.u64IntInfo = pVmxTransient->uIdtVectoringInfo;
11382 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
11383 AssertRCReturn(rc, rc);
11384 if (fErrorCodeValid)
11385 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
11386 else
11387 pVCpu->hm.s.Event.u32ErrCode = 0;
11388 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
11389 && uVector == X86_XCPT_PF)
11390 {
11391 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2;
11392 }
11393
11394 Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
11395 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
11396 return VINF_EM_RAW_INJECT_TRPM_EVENT;
11397 }
11398 }
11399
11400 /** @todo Emulate task switch someday, currently just going back to ring-3 for
11401 * emulation. */
11402 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
11403 return VERR_EM_INTERPRETER;
11404}
11405
11406
11407/**
11408 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
11409 */
11410HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11411{
11412 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11413 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
11414 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
11415 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
11416 AssertRCReturn(rc, rc);
11417 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
11418 return VINF_EM_DBG_STEPPED;
11419}
11420
11421
11422/**
11423 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
11424 */
11425HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11426{
11427 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11428
11429 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11430 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11431 if (RT_UNLIKELY(rc != VINF_SUCCESS))
11432 {
11433 if (rc == VINF_HM_DOUBLE_FAULT)
11434 rc = VINF_SUCCESS;
11435 return rc;
11436 }
11437
11438#if 0
11439 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
11440 * just sync the whole thing. */
11441 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11442#else
11443 /* Aggressive state sync. for now. */
11444 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11445 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11446 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11447#endif
11448 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11449 AssertRCReturn(rc, rc);
11450
11451 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
11452 uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
11453 switch (uAccessType)
11454 {
11455 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
11456 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
11457 {
11458 AssertMsg( !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
11459 || VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) != 0x80,
11460 ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
11461
11462 RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
11463 GCPhys &= PAGE_BASE_GC_MASK;
11464 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
11465 PVM pVM = pVCpu->CTX_SUFF(pVM);
11466 Log4(("ApicAccess uAccessType=%#x GCPhys=%#RGv Off=%#x\n", uAccessType, GCPhys,
11467 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
11468
11469 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu,
11470 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW,
11471 CPUMCTX2CORE(pMixedCtx), GCPhys);
11472 rc = VBOXSTRICTRC_VAL(rc2);
11473 Log4(("ApicAccess rc=%d\n", rc));
11474 if ( rc == VINF_SUCCESS
11475 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11476 || rc == VERR_PAGE_NOT_PRESENT)
11477 {
11478 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11479 | HM_CHANGED_GUEST_RSP
11480 | HM_CHANGED_GUEST_RFLAGS
11481 | HM_CHANGED_VMX_GUEST_APIC_STATE);
11482 rc = VINF_SUCCESS;
11483 }
11484 break;
11485 }
11486
11487 default:
11488 Log4(("ApicAccess uAccessType=%#x\n", uAccessType));
11489 rc = VINF_EM_RAW_EMULATE_INSTR;
11490 break;
11491 }
11492
11493 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
11494 if (rc != VINF_SUCCESS)
11495 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchApicAccessToR3);
11496 return rc;
11497}
11498
11499
11500/**
11501 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
11502 * VM-exit.
11503 */
11504HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11505{
11506 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11507
11508 /* We should -not- get this VM-exit if the guest's debug registers were active. */
11509 if (pVmxTransient->fWasGuestDebugStateActive)
11510 {
11511 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11512 HMVMX_RETURN_UNEXPECTED_EXIT();
11513 }
11514
11515 int rc = VERR_INTERNAL_ERROR_5;
11516 if ( !DBGFIsStepping(pVCpu)
11517 && !pVCpu->hm.s.fSingleInstruction
11518 && !pVmxTransient->fWasHyperDebugStateActive)
11519 {
11520 /* Don't intercept MOV DRx and #DB any more. */
11521 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
11522 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
11523 AssertRCReturn(rc, rc);
11524
11525 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
11526 {
11527#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11528 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
11529 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
11530#endif
11531 }
11532
11533 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
11534 VMMRZCallRing3Disable(pVCpu);
11535 HM_DISABLE_PREEMPT();
11536
11537 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
11538 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
11539 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
11540
11541 HM_RESTORE_PREEMPT();
11542 VMMRZCallRing3Enable(pVCpu);
11543
11544#ifdef VBOX_WITH_STATISTICS
11545 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11546 AssertRCReturn(rc, rc);
11547 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
11548 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
11549 else
11550 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
11551#endif
11552 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
11553 return VINF_SUCCESS;
11554 }
11555
11556 /*
11557 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date.
11558 * Update the segment registers and DR7 from the CPU.
11559 */
11560 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11561 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11562 rc |= hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11563 AssertRCReturn(rc, rc);
11564 Log4(("CS:RIP=%04x:%08RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
11565
11566 PVM pVM = pVCpu->CTX_SUFF(pVM);
11567 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
11568 {
11569 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
11570 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
11571 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
11572 if (RT_SUCCESS(rc))
11573 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
11574 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
11575 }
11576 else
11577 {
11578 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
11579 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
11580 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
11581 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
11582 }
11583
11584 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
11585 if (RT_SUCCESS(rc))
11586 {
11587 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11588 AssertRCReturn(rc2, rc2);
11589 }
11590 return rc;
11591}
11592
11593
11594/**
11595 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
11596 * Conditional VM-exit.
11597 */
11598HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11599{
11600 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11601 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
11602
11603 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11604 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11605 if (RT_UNLIKELY(rc != VINF_SUCCESS))
11606 {
11607 if (rc == VINF_HM_DOUBLE_FAULT)
11608 rc = VINF_SUCCESS;
11609 return rc;
11610 }
11611
11612 RTGCPHYS GCPhys = 0;
11613 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
11614
11615#if 0
11616 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
11617#else
11618 /* Aggressive state sync. for now. */
11619 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11620 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11621 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11622#endif
11623 AssertRCReturn(rc, rc);
11624
11625 /*
11626 * If we succeed, resume guest execution.
11627 * If we fail in interpreting the instruction because we couldn't get the guest physical address
11628 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
11629 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
11630 * weird case. See @bugref{6043}.
11631 */
11632 PVM pVM = pVCpu->CTX_SUFF(pVM);
11633 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
11634 rc = VBOXSTRICTRC_VAL(rc2);
11635 Log4(("EPT misconfig at %#RGv RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc));
11636 if ( rc == VINF_SUCCESS
11637 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11638 || rc == VERR_PAGE_NOT_PRESENT)
11639 {
11640 /* Successfully handled MMIO operation. */
11641 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11642 | HM_CHANGED_GUEST_RSP
11643 | HM_CHANGED_GUEST_RFLAGS
11644 | HM_CHANGED_VMX_GUEST_APIC_STATE);
11645 rc = VINF_SUCCESS;
11646 }
11647 return rc;
11648}
11649
11650
11651/**
11652 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
11653 * VM-exit.
11654 */
11655HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11656{
11657 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11658 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
11659
11660 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11661 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11662 if (RT_UNLIKELY(rc != VINF_SUCCESS))
11663 {
11664 if (rc == VINF_HM_DOUBLE_FAULT)
11665 rc = VINF_SUCCESS;
11666 return rc;
11667 }
11668
11669 RTGCPHYS GCPhys = 0;
11670 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
11671 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11672#if 0
11673 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
11674#else
11675 /* Aggressive state sync. for now. */
11676 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11677 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11678 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11679#endif
11680 AssertRCReturn(rc, rc);
11681
11682 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
11683 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
11684
11685 RTGCUINT uErrorCode = 0;
11686 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
11687 uErrorCode |= X86_TRAP_PF_ID;
11688 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
11689 uErrorCode |= X86_TRAP_PF_RW;
11690 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
11691 uErrorCode |= X86_TRAP_PF_P;
11692
11693 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
11694
11695 Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys,
11696 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
11697
11698 /* Handle the pagefault trap for the nested shadow table. */
11699 PVM pVM = pVCpu->CTX_SUFF(pVM);
11700 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
11701 TRPMResetTrap(pVCpu);
11702
11703 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
11704 if ( rc == VINF_SUCCESS
11705 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11706 || rc == VERR_PAGE_NOT_PRESENT)
11707 {
11708 /* Successfully synced our nested page tables. */
11709 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
11710 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11711 | HM_CHANGED_GUEST_RSP
11712 | HM_CHANGED_GUEST_RFLAGS);
11713 return VINF_SUCCESS;
11714 }
11715
11716 Log4(("EPT return to ring-3 rc=%Rrc\n", rc));
11717 return rc;
11718}
11719
11720/** @} */
11721
11722/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11723/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
11724/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11725
11726/** @name VM-exit exception handlers.
11727 * @{
11728 */
11729
11730/**
11731 * VM-exit exception handler for #MF (Math Fault: floating point exception).
11732 */
11733static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11734{
11735 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11736 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
11737
11738 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11739 AssertRCReturn(rc, rc);
11740
11741 if (!(pMixedCtx->cr0 & X86_CR0_NE))
11742 {
11743 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
11744 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
11745
11746 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
11747 * provides VM-exit instruction length. If this causes problem later,
11748 * disassemble the instruction like it's done on AMD-V. */
11749 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11750 AssertRCReturn(rc2, rc2);
11751 return rc;
11752 }
11753
11754 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11755 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11756 return rc;
11757}
11758
11759
11760/**
11761 * VM-exit exception handler for #BP (Breakpoint exception).
11762 */
11763static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11764{
11765 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11766 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
11767
11768 /** @todo Try optimize this by not saving the entire guest state unless
11769 * really needed. */
11770 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11771 AssertRCReturn(rc, rc);
11772
11773 PVM pVM = pVCpu->CTX_SUFF(pVM);
11774 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11775 if (rc == VINF_EM_RAW_GUEST_TRAP)
11776 {
11777 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11778 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11779 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11780 AssertRCReturn(rc, rc);
11781
11782 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11783 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11784 }
11785
11786 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
11787 return rc;
11788}
11789
11790
11791/**
11792 * VM-exit exception handler for #DB (Debug exception).
11793 */
11794static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11795{
11796 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11797 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
11798 Log6(("XcptDB\n"));
11799
11800 /*
11801 * Get the DR6-like values from the VM-exit qualification and pass it to DBGF
11802 * for processing.
11803 */
11804 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11805 AssertRCReturn(rc, rc);
11806
11807 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
11808 uint64_t uDR6 = X86_DR6_INIT_VAL;
11809 uDR6 |= ( pVmxTransient->uExitQualification
11810 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
11811
11812 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
11813 if (rc == VINF_EM_RAW_GUEST_TRAP)
11814 {
11815 /*
11816 * The exception was for the guest. Update DR6, DR7.GD and
11817 * IA32_DEBUGCTL.LBR before forwarding it.
11818 * (See Intel spec. 27.1 "Architectural State before a VM-Exit".)
11819 */
11820 VMMRZCallRing3Disable(pVCpu);
11821 HM_DISABLE_PREEMPT();
11822
11823 pMixedCtx->dr[6] &= ~X86_DR6_B_MASK;
11824 pMixedCtx->dr[6] |= uDR6;
11825 if (CPUMIsGuestDebugStateActive(pVCpu))
11826 ASMSetDR6(pMixedCtx->dr[6]);
11827
11828 HM_RESTORE_PREEMPT();
11829 VMMRZCallRing3Enable(pVCpu);
11830
11831 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11832 AssertRCReturn(rc, rc);
11833
11834 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
11835 pMixedCtx->dr[7] &= ~X86_DR7_GD;
11836
11837 /* Paranoia. */
11838 pMixedCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
11839 pMixedCtx->dr[7] |= X86_DR7_RA1_MASK;
11840
11841 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
11842 AssertRCReturn(rc, rc);
11843
11844 /*
11845 * Raise #DB in the guest.
11846 *
11847 * It is important to reflect what the VM-exit gave us (preserving the interruption-type) rather than use
11848 * hmR0VmxSetPendingXcptDB() as the #DB could've been raised while executing ICEBP and not the 'normal' #DB.
11849 * Thus it -may- trigger different handling in the CPU (like skipped DPL checks). See @bugref{6398}.
11850 *
11851 * Since ICEBP isn't documented on Intel, see AMD spec. 15.20 "Event Injection".
11852 */
11853 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11854 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11855 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11856 AssertRCReturn(rc, rc);
11857 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11858 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11859 return VINF_SUCCESS;
11860 }
11861
11862 /*
11863 * Not a guest trap, must be a hypervisor related debug event then.
11864 * Update DR6 in case someone is interested in it.
11865 */
11866 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
11867 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
11868 CPUMSetHyperDR6(pVCpu, uDR6);
11869
11870 return rc;
11871}
11872
11873
11874/**
11875 * VM-exit exception handler for #NM (Device-not-available exception: floating
11876 * point exception).
11877 */
11878static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11879{
11880 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11881
11882 /* We require CR0 and EFER. EFER is always up-to-date. */
11883 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11884 AssertRCReturn(rc, rc);
11885
11886 /* We're playing with the host CPU state here, have to disable preemption or longjmp. */
11887 VMMRZCallRing3Disable(pVCpu);
11888 HM_DISABLE_PREEMPT();
11889
11890 /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */
11891 if (pVmxTransient->fWasGuestFPUStateActive)
11892 {
11893 rc = VINF_EM_RAW_GUEST_TRAP;
11894 Assert(CPUMIsGuestFPUStateActive(pVCpu) || HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));
11895 }
11896 else
11897 {
11898#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11899 Assert(!pVmxTransient->fWasGuestFPUStateActive);
11900#endif
11901 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
11902 Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu)));
11903 }
11904
11905 HM_RESTORE_PREEMPT();
11906 VMMRZCallRing3Enable(pVCpu);
11907
11908 if (rc == VINF_SUCCESS)
11909 {
11910 /* Guest FPU state was activated, we'll want to change CR0 FPU intercepts before the next VM-reentry. */
11911 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
11912 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
11913 pVCpu->hm.s.fPreloadGuestFpu = true;
11914 }
11915 else
11916 {
11917 /* Forward #NM to the guest. */
11918 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
11919 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11920 AssertRCReturn(rc, rc);
11921 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11922 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
11923 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
11924 }
11925
11926 return VINF_SUCCESS;
11927}
11928
11929
11930/**
11931 * VM-exit exception handler for #GP (General-protection exception).
11932 *
11933 * @remarks Requires pVmxTransient->uExitIntInfo to be up-to-date.
11934 */
11935static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11936{
11937 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11938 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
11939
11940 int rc = VERR_INTERNAL_ERROR_5;
11941 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
11942 {
11943#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11944 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
11945 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11946 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11947 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11948 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11949 AssertRCReturn(rc, rc);
11950 Log4(("#GP Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip,
11951 pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));
11952 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11953 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11954 return rc;
11955#else
11956 /* We don't intercept #GP. */
11957 AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
11958 NOREF(pVmxTransient);
11959 return VERR_VMX_UNEXPECTED_EXCEPTION;
11960#endif
11961 }
11962
11963 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
11964 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
11965
11966 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
11967 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11968 AssertRCReturn(rc, rc);
11969
11970 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
11971 uint32_t cbOp = 0;
11972 PVM pVM = pVCpu->CTX_SUFF(pVM);
11973 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
11974 if (RT_SUCCESS(rc))
11975 {
11976 rc = VINF_SUCCESS;
11977 Assert(cbOp == pDis->cbInstr);
11978 Log4(("#GP Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
11979 switch (pDis->pCurInstr->uOpcode)
11980 {
11981 case OP_CLI:
11982 {
11983 pMixedCtx->eflags.Bits.u1IF = 0;
11984 pMixedCtx->eflags.Bits.u1RF = 0;
11985 pMixedCtx->rip += pDis->cbInstr;
11986 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11987 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11988 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
11989 break;
11990 }
11991
11992 case OP_STI:
11993 {
11994 bool fOldIF = pMixedCtx->eflags.Bits.u1IF;
11995 pMixedCtx->eflags.Bits.u1IF = 1;
11996 pMixedCtx->eflags.Bits.u1RF = 0;
11997 pMixedCtx->rip += pDis->cbInstr;
11998 if (!fOldIF)
11999 {
12000 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
12001 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
12002 }
12003 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
12004 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
12005 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
12006 break;
12007 }
12008
12009 case OP_HLT:
12010 {
12011 rc = VINF_EM_HALT;
12012 pMixedCtx->rip += pDis->cbInstr;
12013 pMixedCtx->eflags.Bits.u1RF = 0;
12014 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
12015 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
12016 break;
12017 }
12018
12019 case OP_POPF:
12020 {
12021 Log4(("POPF CS:EIP %04x:%04RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
12022 uint32_t cbParm;
12023 uint32_t uMask;
12024 bool fStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
12025 if (pDis->fPrefix & DISPREFIX_OPSIZE)
12026 {
12027 cbParm = 4;
12028 uMask = 0xffffffff;
12029 }
12030 else
12031 {
12032 cbParm = 2;
12033 uMask = 0xffff;
12034 }
12035
12036 /* Get the stack pointer & pop the contents of the stack onto Eflags. */
12037 RTGCPTR GCPtrStack = 0;
12038 X86EFLAGS Eflags;
12039 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
12040 &GCPtrStack);
12041 if (RT_SUCCESS(rc))
12042 {
12043 Assert(sizeof(Eflags.u32) >= cbParm);
12044 Eflags.u32 = 0;
12045 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u32, cbParm);
12046 }
12047 if (RT_FAILURE(rc))
12048 {
12049 rc = VERR_EM_INTERPRETER;
12050 break;
12051 }
12052 Log4(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
12053 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF))
12054 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask);
12055 pMixedCtx->esp += cbParm;
12056 pMixedCtx->esp &= uMask;
12057 pMixedCtx->rip += pDis->cbInstr;
12058 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12059 | HM_CHANGED_GUEST_RSP
12060 | HM_CHANGED_GUEST_RFLAGS);
12061 /* Generate a pending-debug exception when stepping over POPF regardless of how POPF modifies EFLAGS.TF. */
12062 if (fStepping)
12063 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
12064
12065 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
12066 break;
12067 }
12068
12069 case OP_PUSHF:
12070 {
12071 uint32_t cbParm;
12072 uint32_t uMask;
12073 if (pDis->fPrefix & DISPREFIX_OPSIZE)
12074 {
12075 cbParm = 4;
12076 uMask = 0xffffffff;
12077 }
12078 else
12079 {
12080 cbParm = 2;
12081 uMask = 0xffff;
12082 }
12083
12084 /* Get the stack pointer & push the contents of eflags onto the stack. */
12085 RTGCPTR GCPtrStack = 0;
12086 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
12087 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
12088 if (RT_FAILURE(rc))
12089 {
12090 rc = VERR_EM_INTERPRETER;
12091 break;
12092 }
12093 X86EFLAGS Eflags = pMixedCtx->eflags;
12094 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
12095 Eflags.Bits.u1RF = 0;
12096 Eflags.Bits.u1VM = 0;
12097
12098 rc = PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u, cbParm);
12099 if (RT_FAILURE(rc))
12100 {
12101 rc = VERR_EM_INTERPRETER;
12102 break;
12103 }
12104 Log4(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack));
12105 pMixedCtx->esp -= cbParm;
12106 pMixedCtx->esp &= uMask;
12107 pMixedCtx->rip += pDis->cbInstr;
12108 pMixedCtx->eflags.Bits.u1RF = 0;
12109 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12110 | HM_CHANGED_GUEST_RSP
12111 | HM_CHANGED_GUEST_RFLAGS);
12112 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
12113 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
12114 break;
12115 }
12116
12117 case OP_IRET:
12118 {
12119 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
12120 * instruction reference. */
12121 RTGCPTR GCPtrStack = 0;
12122 uint32_t uMask = 0xffff;
12123 bool fStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
12124 uint16_t aIretFrame[3];
12125 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
12126 {
12127 rc = VERR_EM_INTERPRETER;
12128 break;
12129 }
12130 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
12131 &GCPtrStack);
12132 if (RT_SUCCESS(rc))
12133 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame));
12134 if (RT_FAILURE(rc))
12135 {
12136 rc = VERR_EM_INTERPRETER;
12137 break;
12138 }
12139 pMixedCtx->eip = 0;
12140 pMixedCtx->ip = aIretFrame[0];
12141 pMixedCtx->cs.Sel = aIretFrame[1];
12142 pMixedCtx->cs.ValidSel = aIretFrame[1];
12143 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
12144 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF))
12145 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
12146 pMixedCtx->sp += sizeof(aIretFrame);
12147 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12148 | HM_CHANGED_GUEST_SEGMENT_REGS
12149 | HM_CHANGED_GUEST_RSP
12150 | HM_CHANGED_GUEST_RFLAGS);
12151 /* Generate a pending-debug exception when stepping over IRET regardless of how IRET modifies EFLAGS.TF. */
12152 if (fStepping)
12153 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
12154 Log4(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
12155 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
12156 break;
12157 }
12158
12159 case OP_INT:
12160 {
12161 uint16_t uVector = pDis->Param1.uValue & 0xff;
12162 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
12163 /* INT clears EFLAGS.TF, we mustn't set any pending debug exceptions here. */
12164 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
12165 break;
12166 }
12167
12168 case OP_INTO:
12169 {
12170 if (pMixedCtx->eflags.Bits.u1OF)
12171 {
12172 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
12173 /* INTO clears EFLAGS.TF, we mustn't set any pending debug exceptions here. */
12174 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
12175 }
12176 else
12177 {
12178 pMixedCtx->eflags.Bits.u1RF = 0;
12179 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
12180 }
12181 break;
12182 }
12183
12184 default:
12185 {
12186 pMixedCtx->eflags.Bits.u1RF = 0; /* This is correct most of the time... */
12187 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
12188 EMCODETYPE_SUPERVISOR);
12189 rc = VBOXSTRICTRC_VAL(rc2);
12190 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
12191 /** @todo We have to set pending-debug exceptions here when the guest is
12192 * single-stepping depending on the instruction that was interpreted. */
12193 Log4(("#GP rc=%Rrc\n", rc));
12194 break;
12195 }
12196 }
12197 }
12198 else
12199 rc = VERR_EM_INTERPRETER;
12200
12201 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
12202 ("#GP Unexpected rc=%Rrc\n", rc));
12203 return rc;
12204}
12205
12206
12207#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
12208/**
12209 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
12210 * the exception reported in the VMX transient structure back into the VM.
12211 *
12212 * @remarks Requires uExitIntInfo in the VMX transient structure to be
12213 * up-to-date.
12214 */
12215static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12216{
12217 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12218
12219 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
12220 hmR0VmxCheckExitDueToEventDelivery(). */
12221 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12222 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12223 AssertRCReturn(rc, rc);
12224 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
12225
12226#ifdef DEBUG_ramshankar
12227 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
12228 uint8_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
12229 Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip));
12230#endif
12231
12232 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12233 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
12234 return VINF_SUCCESS;
12235}
12236#endif
12237
12238
12239/**
12240 * VM-exit exception handler for #PF (Page-fault exception).
12241 */
12242static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12243{
12244 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12245 PVM pVM = pVCpu->CTX_SUFF(pVM);
12246 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12247 rc |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
12248 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12249 AssertRCReturn(rc, rc);
12250
12251#if defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) || defined(HMVMX_ALWAYS_TRAP_PF)
12252 if (pVM->hm.s.fNestedPaging)
12253 {
12254 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
12255 if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF))
12256 {
12257 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
12258 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12259 0 /* cbInstr */, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQualification);
12260 }
12261 else
12262 {
12263 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
12264 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
12265 Log4(("Pending #DF due to vectoring #PF. NP\n"));
12266 }
12267 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
12268 return rc;
12269 }
12270#else
12271 Assert(!pVM->hm.s.fNestedPaging);
12272 NOREF(pVM);
12273#endif
12274
12275 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
12276 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
12277 if (pVmxTransient->fVectoringPF)
12278 {
12279 Assert(pVCpu->hm.s.Event.fPending);
12280 return VINF_EM_RAW_INJECT_TRPM_EVENT;
12281 }
12282
12283 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
12284 AssertRCReturn(rc, rc);
12285
12286 Log4(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
12287 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntErrorCode, pMixedCtx->cr3));
12288
12289 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
12290 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pMixedCtx),
12291 (RTGCPTR)pVmxTransient->uExitQualification);
12292
12293 Log4(("#PF: rc=%Rrc\n", rc));
12294 if (rc == VINF_SUCCESS)
12295 {
12296 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
12297 /** @todo this isn't quite right, what if guest does lgdt with some MMIO
12298 * memory? We don't update the whole state here... */
12299 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12300 | HM_CHANGED_GUEST_RSP
12301 | HM_CHANGED_GUEST_RFLAGS
12302 | HM_CHANGED_VMX_GUEST_APIC_STATE);
12303 TRPMResetTrap(pVCpu);
12304 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
12305 return rc;
12306 }
12307
12308 if (rc == VINF_EM_RAW_GUEST_TRAP)
12309 {
12310 if (!pVmxTransient->fVectoringDoublePF)
12311 {
12312 /* It's a guest page fault and needs to be reflected to the guest. */
12313 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
12314 TRPMResetTrap(pVCpu);
12315 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
12316 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
12317 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12318 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
12319 }
12320 else
12321 {
12322 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
12323 TRPMResetTrap(pVCpu);
12324 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
12325 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
12326 Log4(("#PF: Pending #DF due to vectoring #PF\n"));
12327 }
12328
12329 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
12330 return VINF_SUCCESS;
12331 }
12332
12333 TRPMResetTrap(pVCpu);
12334 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
12335 return rc;
12336}
12337
12338/** @} */
12339
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette