VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 56874

Last change on this file since 56874 was 56837, checked in by vboxsync, 10 years ago

VMM/HMVMXR0, HMSVMR0: Disable currently dead code path.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 514.0 KB
Line 
1/* $Id: HMVMXR0.cpp 56837 2015-07-07 11:46:09Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <iprt/x86.h>
23#include <iprt/asm-amd64-x86.h>
24#include <iprt/thread.h>
25
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/dbgf.h>
28#include <VBox/vmm/iem.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/selm.h>
31#include <VBox/vmm/tm.h>
32#include <VBox/vmm/gim.h>
33#ifdef VBOX_WITH_REM
34# include <VBox/vmm/rem.h>
35#endif
36#include "HMInternal.h"
37#include <VBox/vmm/vm.h>
38#include "HMVMXR0.h"
39#include "dtrace/VBoxVMM.h"
40
41#ifdef DEBUG_ramshankar
42# define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS
43# define HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE
44# define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
45# define HMVMX_ALWAYS_CHECK_GUEST_STATE
46# define HMVMX_ALWAYS_TRAP_ALL_XCPTS
47# define HMVMX_ALWAYS_TRAP_PF
48# define HMVMX_ALWAYS_SWAP_FPU_STATE
49# define HMVMX_ALWAYS_FLUSH_TLB
50# define HMVMX_ALWAYS_SWAP_EFER
51#endif
52
53
54/*******************************************************************************
55* Defined Constants And Macros *
56*******************************************************************************/
57#if defined(RT_ARCH_AMD64)
58# define HMVMX_IS_64BIT_HOST_MODE() (true)
59typedef RTHCUINTREG HMVMXHCUINTREG;
60#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
61extern "C" uint32_t g_fVMXIs64bitHost;
62# define HMVMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)
63typedef uint64_t HMVMXHCUINTREG;
64#else
65# define HMVMX_IS_64BIT_HOST_MODE() (false)
66typedef RTHCUINTREG HMVMXHCUINTREG;
67#endif
68
69/** Use the function table. */
70#define HMVMX_USE_FUNCTION_TABLE
71
72/** Determine which tagged-TLB flush handler to use. */
73#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
74#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
75#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
76#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
77
78/** @name Updated-guest-state flags.
79 * @{ */
80#define HMVMX_UPDATED_GUEST_RIP RT_BIT(0)
81#define HMVMX_UPDATED_GUEST_RSP RT_BIT(1)
82#define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2)
83#define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3)
84#define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4)
85#define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5)
86#define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6)
87#define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7)
88#define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8)
89#define HMVMX_UPDATED_GUEST_TR RT_BIT(9)
90#define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10)
91#define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(11)
92#define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(12)
93#define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(13)
94#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(14)
95#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(15)
96#define HMVMX_UPDATED_GUEST_LAZY_MSRS RT_BIT(16)
97#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(17)
98#define HMVMX_UPDATED_GUEST_INTR_STATE RT_BIT(18)
99#define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19)
100#define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \
101 | HMVMX_UPDATED_GUEST_RSP \
102 | HMVMX_UPDATED_GUEST_RFLAGS \
103 | HMVMX_UPDATED_GUEST_CR0 \
104 | HMVMX_UPDATED_GUEST_CR3 \
105 | HMVMX_UPDATED_GUEST_CR4 \
106 | HMVMX_UPDATED_GUEST_GDTR \
107 | HMVMX_UPDATED_GUEST_IDTR \
108 | HMVMX_UPDATED_GUEST_LDTR \
109 | HMVMX_UPDATED_GUEST_TR \
110 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \
111 | HMVMX_UPDATED_GUEST_DEBUG \
112 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \
113 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \
114 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \
115 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \
116 | HMVMX_UPDATED_GUEST_LAZY_MSRS \
117 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \
118 | HMVMX_UPDATED_GUEST_INTR_STATE \
119 | HMVMX_UPDATED_GUEST_APIC_STATE)
120/** @} */
121
122/** @name
123 * Flags to skip redundant reads of some common VMCS fields that are not part of
124 * the guest-CPU state but are in the transient structure.
125 */
126#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
127#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
128#define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
129#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3)
130#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
131#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
132#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO RT_BIT(6)
133/** @} */
134
135/** @name
136 * States of the VMCS.
137 *
138 * This does not reflect all possible VMCS states but currently only those
139 * needed for maintaining the VMCS consistently even when thread-context hooks
140 * are used. Maybe later this can be extended (i.e. Nested Virtualization).
141 */
142#define HMVMX_VMCS_STATE_CLEAR RT_BIT(0)
143#define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1)
144#define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2)
145/** @} */
146
147/**
148 * Exception bitmap mask for real-mode guests (real-on-v86).
149 *
150 * We need to intercept all exceptions manually except:
151 * - #NM, #MF handled in hmR0VmxLoadSharedCR0().
152 * - #DB handled in hmR0VmxLoadSharedDebugState().
153 * - #PF need not be intercepted even in real-mode if we have Nested Paging
154 * support.
155 */
156#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
157 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
158 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
159 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
160 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
161 /* RT_BIT(X86_XCPT_MF) */ | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \
162 | RT_BIT(X86_XCPT_XF))
163
164/**
165 * Exception bitmap mask for all contributory exceptions.
166 *
167 * Page fault is deliberately excluded here as it's conditional as to whether
168 * it's contributory or benign. Page faults are handled separately.
169 */
170#define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
171 | RT_BIT(X86_XCPT_DE))
172
173/** Maximum VM-instruction error number. */
174#define HMVMX_INSTR_ERROR_MAX 28
175
176/** Profiling macro. */
177#ifdef HM_PROFILE_EXIT_DISPATCH
178# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
179# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
180#else
181# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
182# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
183#endif
184
185/** Assert that preemption is disabled or covered by thread-context hooks. */
186#define HMVMX_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHookIsEnabled(pVCpu) \
187 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
188
189/** Assert that we haven't migrated CPUs when thread-context hooks are not
190 * used. */
191#define HMVMX_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHookIsEnabled(pVCpu) \
192 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
193 ("Illegal migration! Entered on CPU %u Current %u\n", \
194 pVCpu->hm.s.idEnteredCpu, RTMpCpuId())); \
195
196/** Helper macro for VM-exit handlers called unexpectedly. */
197#define HMVMX_RETURN_UNEXPECTED_EXIT() \
198 do { \
199 pVCpu->hm.s.u32HMError = pVmxTransient->uExitReason; \
200 return VERR_VMX_UNEXPECTED_EXIT; \
201 } while (0)
202
203
204/*******************************************************************************
205* Structures and Typedefs *
206*******************************************************************************/
207/**
208 * VMX transient state.
209 *
210 * A state structure for holding miscellaneous information across
211 * VMX non-root operation and restored after the transition.
212 */
213typedef struct VMXTRANSIENT
214{
215 /** The host's rflags/eflags. */
216 RTCCUINTREG fEFlags;
217#if HC_ARCH_BITS == 32
218 uint32_t u32Alignment0;
219#endif
220 /** The guest's TPR value used for TPR shadowing. */
221 uint8_t u8GuestTpr;
222 /** Alignment. */
223 uint8_t abAlignment0[7];
224
225 /** The basic VM-exit reason. */
226 uint16_t uExitReason;
227 /** Alignment. */
228 uint16_t u16Alignment0;
229 /** The VM-exit interruption error code. */
230 uint32_t uExitIntErrorCode;
231 /** The VM-exit exit code qualification. */
232 uint64_t uExitQualification;
233
234 /** The VM-exit interruption-information field. */
235 uint32_t uExitIntInfo;
236 /** The VM-exit instruction-length field. */
237 uint32_t cbInstr;
238 /** The VM-exit instruction-information field. */
239 union
240 {
241 /** Plain unsigned int representation. */
242 uint32_t u;
243 /** INS and OUTS information. */
244 struct
245 {
246 uint32_t u6Reserved0 : 7;
247 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
248 uint32_t u3AddrSize : 3;
249 uint32_t u5Reserved1 : 5;
250 /** The segment register (X86_SREG_XXX). */
251 uint32_t iSegReg : 3;
252 uint32_t uReserved2 : 14;
253 } StrIo;
254 } ExitInstrInfo;
255 /** Whether the VM-entry failed or not. */
256 bool fVMEntryFailed;
257 /** Alignment. */
258 uint8_t abAlignment1[3];
259
260 /** The VM-entry interruption-information field. */
261 uint32_t uEntryIntInfo;
262 /** The VM-entry exception error code field. */
263 uint32_t uEntryXcptErrorCode;
264 /** The VM-entry instruction length field. */
265 uint32_t cbEntryInstr;
266
267 /** IDT-vectoring information field. */
268 uint32_t uIdtVectoringInfo;
269 /** IDT-vectoring error code. */
270 uint32_t uIdtVectoringErrorCode;
271
272 /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
273 uint32_t fVmcsFieldsRead;
274
275 /** Whether the guest FPU was active at the time of VM-exit. */
276 bool fWasGuestFPUStateActive;
277 /** Whether the guest debug state was active at the time of VM-exit. */
278 bool fWasGuestDebugStateActive;
279 /** Whether the hyper debug state was active at the time of VM-exit. */
280 bool fWasHyperDebugStateActive;
281 /** Whether TSC-offsetting should be setup before VM-entry. */
282 bool fUpdateTscOffsettingAndPreemptTimer;
283 /** Whether the VM-exit was caused by a page-fault during delivery of a
284 * contributory exception or a page-fault. */
285 bool fVectoringDoublePF;
286 /** Whether the VM-exit was caused by a page-fault during delivery of an
287 * external interrupt or NMI. */
288 bool fVectoringPF;
289} VMXTRANSIENT;
290AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
291AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntInfo, sizeof(uint64_t));
292AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo, sizeof(uint64_t));
293AssertCompileMemberAlignment(VMXTRANSIENT, fWasGuestFPUStateActive, sizeof(uint64_t));
294AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
295/** Pointer to VMX transient state. */
296typedef VMXTRANSIENT *PVMXTRANSIENT;
297
298
299/**
300 * MSR-bitmap read permissions.
301 */
302typedef enum VMXMSREXITREAD
303{
304 /** Reading this MSR causes a VM-exit. */
305 VMXMSREXIT_INTERCEPT_READ = 0xb,
306 /** Reading this MSR does not cause a VM-exit. */
307 VMXMSREXIT_PASSTHRU_READ
308} VMXMSREXITREAD;
309/** Pointer to MSR-bitmap read permissions. */
310typedef VMXMSREXITREAD* PVMXMSREXITREAD;
311
312/**
313 * MSR-bitmap write permissions.
314 */
315typedef enum VMXMSREXITWRITE
316{
317 /** Writing to this MSR causes a VM-exit. */
318 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
319 /** Writing to this MSR does not cause a VM-exit. */
320 VMXMSREXIT_PASSTHRU_WRITE
321} VMXMSREXITWRITE;
322/** Pointer to MSR-bitmap write permissions. */
323typedef VMXMSREXITWRITE* PVMXMSREXITWRITE;
324
325
326/**
327 * VMX VM-exit handler.
328 *
329 * @returns VBox status code.
330 * @param pVCpu Pointer to the VMCPU.
331 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
332 * out-of-sync. Make sure to update the required
333 * fields before using them.
334 * @param pVmxTransient Pointer to the VMX-transient structure.
335 */
336#ifndef HMVMX_USE_FUNCTION_TABLE
337typedef int FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
338#else
339typedef DECLCALLBACK(int) FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
340/** Pointer to VM-exit handler. */
341typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
342#endif
343
344
345/*******************************************************************************
346* Internal Functions *
347*******************************************************************************/
348static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush);
349static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr);
350static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
351 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress,
352 bool fStepping, uint32_t *puIntState);
353#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
354static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
355#endif
356#ifndef HMVMX_USE_FUNCTION_TABLE
357DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
358# define HMVMX_EXIT_DECL static int
359#else
360# define HMVMX_EXIT_DECL static DECLCALLBACK(int)
361#endif
362DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitStep(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
363 uint32_t uExitReason, uint16_t uCsStart, uint64_t uRipStart);
364
365/** @name VM-exit handlers.
366 * @{
367 */
368static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmi;
369static FNVMXEXITHANDLER hmR0VmxExitExtInt;
370static FNVMXEXITHANDLER hmR0VmxExitTripleFault;
371static FNVMXEXITHANDLER hmR0VmxExitInitSignal;
372static FNVMXEXITHANDLER hmR0VmxExitSipi;
373static FNVMXEXITHANDLER hmR0VmxExitIoSmi;
374static FNVMXEXITHANDLER hmR0VmxExitSmi;
375static FNVMXEXITHANDLER hmR0VmxExitIntWindow;
376static FNVMXEXITHANDLER hmR0VmxExitNmiWindow;
377static FNVMXEXITHANDLER hmR0VmxExitTaskSwitch;
378static FNVMXEXITHANDLER hmR0VmxExitCpuid;
379static FNVMXEXITHANDLER hmR0VmxExitGetsec;
380static FNVMXEXITHANDLER hmR0VmxExitHlt;
381static FNVMXEXITHANDLER hmR0VmxExitInvd;
382static FNVMXEXITHANDLER hmR0VmxExitInvlpg;
383static FNVMXEXITHANDLER hmR0VmxExitRdpmc;
384static FNVMXEXITHANDLER hmR0VmxExitVmcall;
385static FNVMXEXITHANDLER hmR0VmxExitRdtsc;
386static FNVMXEXITHANDLER hmR0VmxExitRsm;
387static FNVMXEXITHANDLER hmR0VmxExitSetPendingXcptUD;
388static FNVMXEXITHANDLER hmR0VmxExitMovCRx;
389static FNVMXEXITHANDLER hmR0VmxExitMovDRx;
390static FNVMXEXITHANDLER hmR0VmxExitIoInstr;
391static FNVMXEXITHANDLER hmR0VmxExitRdmsr;
392static FNVMXEXITHANDLER hmR0VmxExitWrmsr;
393static FNVMXEXITHANDLER hmR0VmxExitErrInvalidGuestState;
394static FNVMXEXITHANDLER hmR0VmxExitErrMsrLoad;
395static FNVMXEXITHANDLER hmR0VmxExitErrUndefined;
396static FNVMXEXITHANDLER hmR0VmxExitMwait;
397static FNVMXEXITHANDLER hmR0VmxExitMtf;
398static FNVMXEXITHANDLER hmR0VmxExitMonitor;
399static FNVMXEXITHANDLER hmR0VmxExitPause;
400static FNVMXEXITHANDLER hmR0VmxExitErrMachineCheck;
401static FNVMXEXITHANDLER hmR0VmxExitTprBelowThreshold;
402static FNVMXEXITHANDLER hmR0VmxExitApicAccess;
403static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
404static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
405static FNVMXEXITHANDLER hmR0VmxExitEptViolation;
406static FNVMXEXITHANDLER hmR0VmxExitEptMisconfig;
407static FNVMXEXITHANDLER hmR0VmxExitRdtscp;
408static FNVMXEXITHANDLER hmR0VmxExitPreemptTimer;
409static FNVMXEXITHANDLER hmR0VmxExitWbinvd;
410static FNVMXEXITHANDLER hmR0VmxExitXsetbv;
411static FNVMXEXITHANDLER hmR0VmxExitRdrand;
412static FNVMXEXITHANDLER hmR0VmxExitInvpcid;
413/** @} */
414
415static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
416static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
417static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
418static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
419static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
420static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
421#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
422static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
423#endif
424static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
425
426/*******************************************************************************
427* Global Variables *
428*******************************************************************************/
429#ifdef HMVMX_USE_FUNCTION_TABLE
430
431/**
432 * VMX_EXIT dispatch table.
433 */
434static const PFNVMXEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
435{
436 /* 00 VMX_EXIT_XCPT_OR_NMI */ hmR0VmxExitXcptOrNmi,
437 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
438 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
439 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
440 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
441 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
442 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
443 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
444 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
445 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
446 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
447 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
448 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
449 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
450 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
451 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
452 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
453 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
454 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitVmcall,
455 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
456 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
457 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
458 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
459 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
460 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
461 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
462 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
463 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
464 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
465 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
466 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
467 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
468 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
469 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
470 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
471 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
472 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
473 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
474 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
475 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
476 /* 40 UNDEFINED */ hmR0VmxExitPause,
477 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
478 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
479 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
480 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
481 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
482 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
483 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
484 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
485 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
486 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
487 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
488 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
489 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
490 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
491 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
492 /* 56 UNDEFINED */ hmR0VmxExitErrUndefined,
493 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
494 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
495 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD,
496 /* 60 VMX_EXIT_RESERVED_60 */ hmR0VmxExitErrUndefined,
497 /* 61 VMX_EXIT_RDSEED */ hmR0VmxExitErrUndefined, /* only spurious exits, so undefined */
498 /* 62 VMX_EXIT_RESERVED_62 */ hmR0VmxExitErrUndefined,
499 /* 63 VMX_EXIT_XSAVES */ hmR0VmxExitSetPendingXcptUD,
500 /* 64 VMX_EXIT_XRSTORS */ hmR0VmxExitSetPendingXcptUD,
501};
502#endif /* HMVMX_USE_FUNCTION_TABLE */
503
504#ifdef VBOX_STRICT
505static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
506{
507 /* 0 */ "(Not Used)",
508 /* 1 */ "VMCALL executed in VMX root operation.",
509 /* 2 */ "VMCLEAR with invalid physical address.",
510 /* 3 */ "VMCLEAR with VMXON pointer.",
511 /* 4 */ "VMLAUNCH with non-clear VMCS.",
512 /* 5 */ "VMRESUME with non-launched VMCS.",
513 /* 6 */ "VMRESUME after VMXOFF",
514 /* 7 */ "VM-entry with invalid control fields.",
515 /* 8 */ "VM-entry with invalid host state fields.",
516 /* 9 */ "VMPTRLD with invalid physical address.",
517 /* 10 */ "VMPTRLD with VMXON pointer.",
518 /* 11 */ "VMPTRLD with incorrect revision identifier.",
519 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
520 /* 13 */ "VMWRITE to read-only VMCS component.",
521 /* 14 */ "(Not Used)",
522 /* 15 */ "VMXON executed in VMX root operation.",
523 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
524 /* 17 */ "VM-entry with non-launched executing VMCS.",
525 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
526 /* 19 */ "VMCALL with non-clear VMCS.",
527 /* 20 */ "VMCALL with invalid VM-exit control fields.",
528 /* 21 */ "(Not Used)",
529 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
530 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
531 /* 24 */ "VMCALL with invalid SMM-monitor features.",
532 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
533 /* 26 */ "VM-entry with events blocked by MOV SS.",
534 /* 27 */ "(Not Used)",
535 /* 28 */ "Invalid operand to INVEPT/INVVPID."
536};
537#endif /* VBOX_STRICT */
538
539
540
541/**
542 * Updates the VM's last error record. If there was a VMX instruction error,
543 * reads the error data from the VMCS and updates VCPU's last error record as
544 * well.
545 *
546 * @param pVM Pointer to the VM.
547 * @param pVCpu Pointer to the VMCPU (can be NULL if @a rc is not
548 * VERR_VMX_UNABLE_TO_START_VM or
549 * VERR_VMX_INVALID_VMCS_FIELD).
550 * @param rc The error code.
551 */
552static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
553{
554 AssertPtr(pVM);
555 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
556 || rc == VERR_VMX_UNABLE_TO_START_VM)
557 {
558 AssertPtrReturnVoid(pVCpu);
559 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
560 }
561 pVM->hm.s.lLastError = rc;
562}
563
564
565/**
566 * Reads the VM-entry interruption-information field from the VMCS into the VMX
567 * transient structure.
568 *
569 * @returns VBox status code.
570 * @param pVmxTransient Pointer to the VMX transient structure.
571 *
572 * @remarks No-long-jump zone!!!
573 */
574DECLINLINE(int) hmR0VmxReadEntryIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
575{
576 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
577 AssertRCReturn(rc, rc);
578 return VINF_SUCCESS;
579}
580
581
582/**
583 * Reads the VM-entry exception error code field from the VMCS into
584 * the VMX transient structure.
585 *
586 * @returns VBox status code.
587 * @param pVmxTransient Pointer to the VMX transient structure.
588 *
589 * @remarks No-long-jump zone!!!
590 */
591DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
592{
593 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
594 AssertRCReturn(rc, rc);
595 return VINF_SUCCESS;
596}
597
598
599/**
600 * Reads the VM-entry exception error code field from the VMCS into
601 * the VMX transient structure.
602 *
603 * @returns VBox status code.
604 * @param pVmxTransient Pointer to the VMX transient structure.
605 *
606 * @remarks No-long-jump zone!!!
607 */
608DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
609{
610 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
611 AssertRCReturn(rc, rc);
612 return VINF_SUCCESS;
613}
614
615
616/**
617 * Reads the VM-exit interruption-information field from the VMCS into the VMX
618 * transient structure.
619 *
620 * @returns VBox status code.
621 * @param pVmxTransient Pointer to the VMX transient structure.
622 */
623DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
624{
625 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
626 {
627 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
628 AssertRCReturn(rc, rc);
629 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
630 }
631 return VINF_SUCCESS;
632}
633
634
635/**
636 * Reads the VM-exit interruption error code from the VMCS into the VMX
637 * transient structure.
638 *
639 * @returns VBox status code.
640 * @param pVmxTransient Pointer to the VMX transient structure.
641 */
642DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
643{
644 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
645 {
646 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
647 AssertRCReturn(rc, rc);
648 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
649 }
650 return VINF_SUCCESS;
651}
652
653
654/**
655 * Reads the VM-exit instruction length field from the VMCS into the VMX
656 * transient structure.
657 *
658 * @returns VBox status code.
659 * @param pVCpu Pointer to the VMCPU.
660 * @param pVmxTransient Pointer to the VMX transient structure.
661 */
662DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
663{
664 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
665 {
666 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
667 AssertRCReturn(rc, rc);
668 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
669 }
670 return VINF_SUCCESS;
671}
672
673
674/**
675 * Reads the VM-exit instruction-information field from the VMCS into
676 * the VMX transient structure.
677 *
678 * @returns VBox status code.
679 * @param pVmxTransient Pointer to the VMX transient structure.
680 */
681DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient)
682{
683 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO))
684 {
685 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
686 AssertRCReturn(rc, rc);
687 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO;
688 }
689 return VINF_SUCCESS;
690}
691
692
693/**
694 * Reads the exit code qualification from the VMCS into the VMX transient
695 * structure.
696 *
697 * @returns VBox status code.
698 * @param pVCpu Pointer to the VMCPU (required for the VMCS cache
699 * case).
700 * @param pVmxTransient Pointer to the VMX transient structure.
701 */
702DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
703{
704 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
705 {
706 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification); NOREF(pVCpu);
707 AssertRCReturn(rc, rc);
708 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
709 }
710 return VINF_SUCCESS;
711}
712
713
714/**
715 * Reads the IDT-vectoring information field from the VMCS into the VMX
716 * transient structure.
717 *
718 * @returns VBox status code.
719 * @param pVmxTransient Pointer to the VMX transient structure.
720 *
721 * @remarks No-long-jump zone!!!
722 */
723DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
724{
725 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
726 {
727 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
728 AssertRCReturn(rc, rc);
729 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
730 }
731 return VINF_SUCCESS;
732}
733
734
735/**
736 * Reads the IDT-vectoring error code from the VMCS into the VMX
737 * transient structure.
738 *
739 * @returns VBox status code.
740 * @param pVmxTransient Pointer to the VMX transient structure.
741 */
742DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
743{
744 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
745 {
746 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
747 AssertRCReturn(rc, rc);
748 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
749 }
750 return VINF_SUCCESS;
751}
752
753
754/**
755 * Enters VMX root mode operation on the current CPU.
756 *
757 * @returns VBox status code.
758 * @param pVM Pointer to the VM (optional, can be NULL, after
759 * a resume).
760 * @param HCPhysCpuPage Physical address of the VMXON region.
761 * @param pvCpuPage Pointer to the VMXON region.
762 */
763static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
764{
765 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
766 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
767 Assert(pvCpuPage);
768 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
769
770 if (pVM)
771 {
772 /* Write the VMCS revision dword to the VMXON region. */
773 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
774 }
775
776 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */
777 RTCCUINTREG fEFlags = ASMIntDisableFlags();
778
779 /* Enable the VMX bit in CR4 if necessary. */
780 RTCCUINTREG uOldCr4 = SUPR0ChangeCR4(X86_CR4_VMXE, ~0);
781
782 /* Enter VMX root mode. */
783 int rc = VMXEnable(HCPhysCpuPage);
784 if (RT_FAILURE(rc))
785 {
786 if (!(uOldCr4 & X86_CR4_VMXE))
787 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
788
789 if (pVM)
790 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
791 }
792
793 /* Restore interrupts. */
794 ASMSetFlags(fEFlags);
795 return rc;
796}
797
798
799/**
800 * Exits VMX root mode operation on the current CPU.
801 *
802 * @returns VBox status code.
803 */
804static int hmR0VmxLeaveRootMode(void)
805{
806 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
807
808 /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */
809 RTCCUINTREG fEFlags = ASMIntDisableFlags();
810
811 /* If we're for some reason not in VMX root mode, then don't leave it. */
812 RTCCUINTREG uHostCR4 = ASMGetCR4();
813
814 int rc;
815 if (uHostCR4 & X86_CR4_VMXE)
816 {
817 /* Exit VMX root mode and clear the VMX bit in CR4. */
818 VMXDisable();
819 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
820 rc = VINF_SUCCESS;
821 }
822 else
823 rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE;
824
825 /* Restore interrupts. */
826 ASMSetFlags(fEFlags);
827 return rc;
828}
829
830
831/**
832 * Allocates and maps one physically contiguous page. The allocated page is
833 * zero'd out. (Used by various VT-x structures).
834 *
835 * @returns IPRT status code.
836 * @param pMemObj Pointer to the ring-0 memory object.
837 * @param ppVirt Where to store the virtual address of the
838 * allocation.
839 * @param pPhys Where to store the physical address of the
840 * allocation.
841 */
842DECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
843{
844 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
845 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
846 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
847
848 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
849 if (RT_FAILURE(rc))
850 return rc;
851 *ppVirt = RTR0MemObjAddress(*pMemObj);
852 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
853 ASMMemZero32(*ppVirt, PAGE_SIZE);
854 return VINF_SUCCESS;
855}
856
857
858/**
859 * Frees and unmaps an allocated physical page.
860 *
861 * @param pMemObj Pointer to the ring-0 memory object.
862 * @param ppVirt Where to re-initialize the virtual address of
863 * allocation as 0.
864 * @param pHCPhys Where to re-initialize the physical address of the
865 * allocation as 0.
866 */
867DECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
868{
869 AssertPtr(pMemObj);
870 AssertPtr(ppVirt);
871 AssertPtr(pHCPhys);
872 if (*pMemObj != NIL_RTR0MEMOBJ)
873 {
874 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
875 AssertRC(rc);
876 *pMemObj = NIL_RTR0MEMOBJ;
877 *ppVirt = 0;
878 *pHCPhys = 0;
879 }
880}
881
882
883/**
884 * Worker function to free VT-x related structures.
885 *
886 * @returns IPRT status code.
887 * @param pVM Pointer to the VM.
888 */
889static void hmR0VmxStructsFree(PVM pVM)
890{
891 for (VMCPUID i = 0; i < pVM->cCpus; i++)
892 {
893 PVMCPU pVCpu = &pVM->aCpus[i];
894 AssertPtr(pVCpu);
895
896 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
897 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
898
899 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
900 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
901
902 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
903 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
904 }
905
906 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
907#ifdef VBOX_WITH_CRASHDUMP_MAGIC
908 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
909#endif
910}
911
912
913/**
914 * Worker function to allocate VT-x related VM structures.
915 *
916 * @returns IPRT status code.
917 * @param pVM Pointer to the VM.
918 */
919static int hmR0VmxStructsAlloc(PVM pVM)
920{
921 /*
922 * Initialize members up-front so we can cleanup properly on allocation failure.
923 */
924#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
925 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
926 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
927 pVM->hm.s.vmx.HCPhys##a_Name = 0;
928
929#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
930 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
931 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
932 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
933
934#ifdef VBOX_WITH_CRASHDUMP_MAGIC
935 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
936#endif
937 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
938
939 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
940 for (VMCPUID i = 0; i < pVM->cCpus; i++)
941 {
942 PVMCPU pVCpu = &pVM->aCpus[i];
943 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
944 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
945 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
946 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
947 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
948 }
949#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
950#undef VMXLOCAL_INIT_VM_MEMOBJ
951
952 /* The VMCS size cannot be more than 4096 bytes. See Intel spec. Appendix A.1 "Basic VMX Information". */
953 AssertReturnStmt(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.Msrs.u64BasicInfo) <= PAGE_SIZE,
954 (&pVM->aCpus[0])->hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE,
955 VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO);
956
957 /*
958 * Allocate all the VT-x structures.
959 */
960 int rc = VINF_SUCCESS;
961#ifdef VBOX_WITH_CRASHDUMP_MAGIC
962 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
963 if (RT_FAILURE(rc))
964 goto cleanup;
965 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
966 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
967#endif
968
969 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
970 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
971 {
972 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
973 &pVM->hm.s.vmx.HCPhysApicAccess);
974 if (RT_FAILURE(rc))
975 goto cleanup;
976 }
977
978 /*
979 * Initialize per-VCPU VT-x structures.
980 */
981 for (VMCPUID i = 0; i < pVM->cCpus; i++)
982 {
983 PVMCPU pVCpu = &pVM->aCpus[i];
984 AssertPtr(pVCpu);
985
986 /* Allocate the VM control structure (VMCS). */
987 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
988 if (RT_FAILURE(rc))
989 goto cleanup;
990
991 /* Allocate the Virtual-APIC page for transparent TPR accesses. */
992 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
993 {
994 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
995 &pVCpu->hm.s.vmx.HCPhysVirtApic);
996 if (RT_FAILURE(rc))
997 goto cleanup;
998 }
999
1000 /*
1001 * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for
1002 * transparent accesses of specific MSRs.
1003 *
1004 * If the condition for enabling MSR bitmaps changes here, don't forget to
1005 * update HMAreMsrBitmapsAvailable().
1006 */
1007 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1008 {
1009 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
1010 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1011 if (RT_FAILURE(rc))
1012 goto cleanup;
1013 ASMMemFill32(pVCpu->hm.s.vmx.pvMsrBitmap, PAGE_SIZE, UINT32_C(0xffffffff));
1014 }
1015
1016 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
1017 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
1018 if (RT_FAILURE(rc))
1019 goto cleanup;
1020
1021 /* Allocate the VM-exit MSR-load page for the host MSRs. */
1022 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
1023 if (RT_FAILURE(rc))
1024 goto cleanup;
1025 }
1026
1027 return VINF_SUCCESS;
1028
1029cleanup:
1030 hmR0VmxStructsFree(pVM);
1031 return rc;
1032}
1033
1034
1035/**
1036 * Does global VT-x initialization (called during module initialization).
1037 *
1038 * @returns VBox status code.
1039 */
1040VMMR0DECL(int) VMXR0GlobalInit(void)
1041{
1042#ifdef HMVMX_USE_FUNCTION_TABLE
1043 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
1044# ifdef VBOX_STRICT
1045 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
1046 Assert(g_apfnVMExitHandlers[i]);
1047# endif
1048#endif
1049 return VINF_SUCCESS;
1050}
1051
1052
1053/**
1054 * Does global VT-x termination (called during module termination).
1055 */
1056VMMR0DECL(void) VMXR0GlobalTerm()
1057{
1058 /* Nothing to do currently. */
1059}
1060
1061
1062/**
1063 * Sets up and activates VT-x on the current CPU.
1064 *
1065 * @returns VBox status code.
1066 * @param pCpu Pointer to the global CPU info struct.
1067 * @param pVM Pointer to the VM (can be NULL after a host resume
1068 * operation).
1069 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
1070 * fEnabledByHost is true).
1071 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
1072 * @a fEnabledByHost is true).
1073 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
1074 * enable VT-x on the host.
1075 * @param pvMsrs Opaque pointer to VMXMSRS struct.
1076 */
1077VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
1078 void *pvMsrs)
1079{
1080 Assert(pCpu);
1081 Assert(pvMsrs);
1082 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1083
1084 /* Enable VT-x if it's not already enabled by the host. */
1085 if (!fEnabledByHost)
1086 {
1087 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
1088 if (RT_FAILURE(rc))
1089 return rc;
1090 }
1091
1092 /*
1093 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been using EPTPs) so
1094 * we don't retain any stale guest-physical mappings which won't get invalidated when flushing by VPID.
1095 */
1096 PVMXMSRS pMsrs = (PVMXMSRS)pvMsrs;
1097 if (pMsrs->u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1098 {
1099 hmR0VmxFlushEpt(NULL /* pVCpu */, VMXFLUSHEPT_ALL_CONTEXTS);
1100 pCpu->fFlushAsidBeforeUse = false;
1101 }
1102 else
1103 pCpu->fFlushAsidBeforeUse = true;
1104
1105 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
1106 ++pCpu->cTlbFlushes;
1107
1108 return VINF_SUCCESS;
1109}
1110
1111
1112/**
1113 * Deactivates VT-x on the current CPU.
1114 *
1115 * @returns VBox status code.
1116 * @param pCpu Pointer to the global CPU info struct.
1117 * @param pvCpuPage Pointer to the VMXON region.
1118 * @param HCPhysCpuPage Physical address of the VMXON region.
1119 *
1120 * @remarks This function should never be called when SUPR0EnableVTx() or
1121 * similar was used to enable VT-x on the host.
1122 */
1123VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
1124{
1125 NOREF(pCpu);
1126 NOREF(pvCpuPage);
1127 NOREF(HCPhysCpuPage);
1128
1129 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1130 return hmR0VmxLeaveRootMode();
1131}
1132
1133
1134/**
1135 * Sets the permission bits for the specified MSR in the MSR bitmap.
1136 *
1137 * @param pVCpu Pointer to the VMCPU.
1138 * @param uMSR The MSR value.
1139 * @param enmRead Whether reading this MSR causes a VM-exit.
1140 * @param enmWrite Whether writing this MSR causes a VM-exit.
1141 */
1142static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
1143{
1144 int32_t iBit;
1145 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1146
1147 /*
1148 * Layout:
1149 * 0x000 - 0x3ff - Low MSR read bits
1150 * 0x400 - 0x7ff - High MSR read bits
1151 * 0x800 - 0xbff - Low MSR write bits
1152 * 0xc00 - 0xfff - High MSR write bits
1153 */
1154 if (uMsr <= 0x00001FFF)
1155 iBit = uMsr;
1156 else if ( uMsr >= 0xC0000000
1157 && uMsr <= 0xC0001FFF)
1158 {
1159 iBit = (uMsr - 0xC0000000);
1160 pbMsrBitmap += 0x400;
1161 }
1162 else
1163 AssertMsgFailedReturnVoid(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1164
1165 Assert(iBit <= 0x1fff);
1166 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1167 ASMBitSet(pbMsrBitmap, iBit);
1168 else
1169 ASMBitClear(pbMsrBitmap, iBit);
1170
1171 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1172 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1173 else
1174 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1175}
1176
1177
1178#ifdef VBOX_STRICT
1179/**
1180 * Gets the permission bits for the specified MSR in the MSR bitmap.
1181 *
1182 * @returns VBox status code.
1183 * @retval VINF_SUCCESS if the specified MSR is found.
1184 * @retval VERR_NOT_FOUND if the specified MSR is not found.
1185 * @retval VERR_NOT_SUPPORTED if VT-x doesn't allow the MSR.
1186 *
1187 * @param pVCpu Pointer to the VMCPU.
1188 * @param uMsr The MSR.
1189 * @param penmRead Where to store the read permissions.
1190 * @param penmWrite Where to store the write permissions.
1191 */
1192static int hmR0VmxGetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, PVMXMSREXITREAD penmRead, PVMXMSREXITWRITE penmWrite)
1193{
1194 AssertPtrReturn(penmRead, VERR_INVALID_PARAMETER);
1195 AssertPtrReturn(penmWrite, VERR_INVALID_PARAMETER);
1196 int32_t iBit;
1197 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1198
1199 /* See hmR0VmxSetMsrPermission() for the layout. */
1200 if (uMsr <= 0x00001FFF)
1201 iBit = uMsr;
1202 else if ( uMsr >= 0xC0000000
1203 && uMsr <= 0xC0001FFF)
1204 {
1205 iBit = (uMsr - 0xC0000000);
1206 pbMsrBitmap += 0x400;
1207 }
1208 else
1209 AssertMsgFailedReturn(("hmR0VmxGetMsrPermission: Invalid MSR %#RX32\n", uMsr), VERR_NOT_SUPPORTED);
1210
1211 Assert(iBit <= 0x1fff);
1212 if (ASMBitTest(pbMsrBitmap, iBit))
1213 *penmRead = VMXMSREXIT_INTERCEPT_READ;
1214 else
1215 *penmRead = VMXMSREXIT_PASSTHRU_READ;
1216
1217 if (ASMBitTest(pbMsrBitmap + 0x800, iBit))
1218 *penmWrite = VMXMSREXIT_INTERCEPT_WRITE;
1219 else
1220 *penmWrite = VMXMSREXIT_PASSTHRU_WRITE;
1221 return VINF_SUCCESS;
1222}
1223#endif /* VBOX_STRICT */
1224
1225
1226/**
1227 * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
1228 * area.
1229 *
1230 * @returns VBox status code.
1231 * @param pVCpu Pointer to the VMCPU.
1232 * @param cMsrs The number of MSRs.
1233 */
1234DECLINLINE(int) hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)
1235{
1236 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
1237 uint32_t const cMaxSupportedMsrs = MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc);
1238 if (RT_UNLIKELY(cMsrs > cMaxSupportedMsrs))
1239 {
1240 LogRel(("CPU auto-load/store MSR count in VMCS exceeded cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs));
1241 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
1242 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1243 }
1244
1245 /* Update number of guest MSRs to load/store across the world-switch. */
1246 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs); AssertRCReturn(rc, rc);
1247 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs); AssertRCReturn(rc, rc);
1248
1249 /* Update number of host MSRs to load after the world-switch. Identical to guest-MSR count as it's always paired. */
1250 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs); AssertRCReturn(rc, rc);
1251
1252 /* Update the VCPU's copy of the MSR count. */
1253 pVCpu->hm.s.vmx.cMsrs = cMsrs;
1254
1255 return VINF_SUCCESS;
1256}
1257
1258
1259/**
1260 * Adds a new (or updates the value of an existing) guest/host MSR
1261 * pair to be swapped during the world-switch as part of the
1262 * auto-load/store MSR area in the VMCS.
1263 *
1264 * @returns VBox status code.
1265 * @param pVCpu Pointer to the VMCPU.
1266 * @param uMsr The MSR.
1267 * @param uGuestMsr Value of the guest MSR.
1268 * @param fUpdateHostMsr Whether to update the value of the host MSR if
1269 * necessary.
1270 * @param pfAddedAndUpdated Where to store whether the MSR was added -and-
1271 * its value was updated. Optional, can be NULL.
1272 */
1273static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr,
1274 bool *pfAddedAndUpdated)
1275{
1276 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1277 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1278 uint32_t i;
1279 for (i = 0; i < cMsrs; i++)
1280 {
1281 if (pGuestMsr->u32Msr == uMsr)
1282 break;
1283 pGuestMsr++;
1284 }
1285
1286 bool fAdded = false;
1287 if (i == cMsrs)
1288 {
1289 ++cMsrs;
1290 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1291 AssertMsgRCReturn(rc, ("hmR0VmxAddAutoLoadStoreMsr: Insufficient space to add MSR %u\n", uMsr), rc);
1292
1293 /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */
1294 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1295 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1296
1297 fAdded = true;
1298 }
1299
1300 /* Update the MSR values in the auto-load/store MSR area. */
1301 pGuestMsr->u32Msr = uMsr;
1302 pGuestMsr->u64Value = uGuestMsrValue;
1303
1304 /* Create/update the MSR slot in the host MSR area. */
1305 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1306 pHostMsr += i;
1307 pHostMsr->u32Msr = uMsr;
1308
1309 /*
1310 * Update the host MSR only when requested by the caller AND when we're
1311 * adding it to the auto-load/store area. Otherwise, it would have been
1312 * updated by hmR0VmxSaveHostMsrs(). We do this for performance reasons.
1313 */
1314 bool fUpdatedMsrValue = false;
1315 if ( fAdded
1316 && fUpdateHostMsr)
1317 {
1318 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1319 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1320 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1321 fUpdatedMsrValue = true;
1322 }
1323
1324 if (pfAddedAndUpdated)
1325 *pfAddedAndUpdated = fUpdatedMsrValue;
1326 return VINF_SUCCESS;
1327}
1328
1329
1330/**
1331 * Removes a guest/host MSR pair to be swapped during the world-switch from the
1332 * auto-load/store MSR area in the VMCS.
1333 *
1334 * @returns VBox status code.
1335 * @param pVCpu Pointer to the VMCPU.
1336 * @param uMsr The MSR.
1337 */
1338static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr)
1339{
1340 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1341 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1342 for (uint32_t i = 0; i < cMsrs; i++)
1343 {
1344 /* Find the MSR. */
1345 if (pGuestMsr->u32Msr == uMsr)
1346 {
1347 /* If it's the last MSR, simply reduce the count. */
1348 if (i == cMsrs - 1)
1349 {
1350 --cMsrs;
1351 break;
1352 }
1353
1354 /* Remove it by swapping the last MSR in place of it, and reducing the count. */
1355 PVMXAUTOMSR pLastGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1356 pLastGuestMsr += cMsrs - 1;
1357 pGuestMsr->u32Msr = pLastGuestMsr->u32Msr;
1358 pGuestMsr->u64Value = pLastGuestMsr->u64Value;
1359
1360 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1361 PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1362 pLastHostMsr += cMsrs - 1;
1363 pHostMsr->u32Msr = pLastHostMsr->u32Msr;
1364 pHostMsr->u64Value = pLastHostMsr->u64Value;
1365 --cMsrs;
1366 break;
1367 }
1368 pGuestMsr++;
1369 }
1370
1371 /* Update the VMCS if the count changed (meaning the MSR was found). */
1372 if (cMsrs != pVCpu->hm.s.vmx.cMsrs)
1373 {
1374 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1375 AssertRCReturn(rc, rc);
1376
1377 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
1378 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1379 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
1380
1381 Log4(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));
1382 return VINF_SUCCESS;
1383 }
1384
1385 return VERR_NOT_FOUND;
1386}
1387
1388
1389/**
1390 * Checks if the specified guest MSR is part of the auto-load/store area in
1391 * the VMCS.
1392 *
1393 * @returns true if found, false otherwise.
1394 * @param pVCpu Pointer to the VMCPU.
1395 * @param uMsr The MSR to find.
1396 */
1397static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1398{
1399 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1400 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1401
1402 for (uint32_t i = 0; i < cMsrs; i++, pGuestMsr++)
1403 {
1404 if (pGuestMsr->u32Msr == uMsr)
1405 return true;
1406 }
1407 return false;
1408}
1409
1410
1411/**
1412 * Updates the value of all host MSRs in the auto-load/store area in the VMCS.
1413 *
1414 * @param pVCpu Pointer to the VMCPU.
1415 *
1416 * @remarks No-long-jump zone!!!
1417 */
1418static void hmR0VmxUpdateAutoLoadStoreHostMsrs(PVMCPU pVCpu)
1419{
1420 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1421 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1422 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1423 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1424
1425 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1426 {
1427 AssertReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr);
1428
1429 /*
1430 * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
1431 * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
1432 */
1433 if (pHostMsr->u32Msr == MSR_K6_EFER)
1434 pHostMsr->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostEfer;
1435 else
1436 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1437 }
1438
1439 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
1440}
1441
1442
1443#if HC_ARCH_BITS == 64
1444/**
1445 * Saves a set of host MSRs to allow read/write passthru access to the guest and
1446 * perform lazy restoration of the host MSRs while leaving VT-x.
1447 *
1448 * @param pVCpu Pointer to the VMCPU.
1449 *
1450 * @remarks No-long-jump zone!!!
1451 */
1452static void hmR0VmxLazySaveHostMsrs(PVMCPU pVCpu)
1453{
1454 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1455
1456 /*
1457 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls().
1458 */
1459 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
1460 {
1461 pVCpu->hm.s.vmx.u64HostLStarMsr = ASMRdMsr(MSR_K8_LSTAR);
1462 pVCpu->hm.s.vmx.u64HostStarMsr = ASMRdMsr(MSR_K6_STAR);
1463 pVCpu->hm.s.vmx.u64HostSFMaskMsr = ASMRdMsr(MSR_K8_SF_MASK);
1464 pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1465 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;
1466 }
1467}
1468
1469
1470/**
1471 * Checks whether the MSR belongs to the set of guest MSRs that we restore
1472 * lazily while leaving VT-x.
1473 *
1474 * @returns true if it does, false otherwise.
1475 * @param pVCpu Pointer to the VMCPU.
1476 * @param uMsr The MSR to check.
1477 */
1478static bool hmR0VmxIsLazyGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1479{
1480 NOREF(pVCpu);
1481 switch (uMsr)
1482 {
1483 case MSR_K8_LSTAR:
1484 case MSR_K6_STAR:
1485 case MSR_K8_SF_MASK:
1486 case MSR_K8_KERNEL_GS_BASE:
1487 return true;
1488 }
1489 return false;
1490}
1491
1492
1493/**
1494 * Saves a set of guest MSRs back into the guest-CPU context.
1495 *
1496 * @param pVCpu Pointer to the VMCPU.
1497 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1498 * out-of-sync. Make sure to update the required fields
1499 * before using them.
1500 *
1501 * @remarks No-long-jump zone!!!
1502 */
1503static void hmR0VmxLazySaveGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1504{
1505 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1506 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1507
1508 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1509 {
1510 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1511 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
1512 pMixedCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
1513 pMixedCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
1514 pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1515 }
1516}
1517
1518
1519/**
1520 * Loads a set of guests MSRs to allow read/passthru to the guest.
1521 *
1522 * The name of this function is slightly confusing. This function does NOT
1523 * postpone loading, but loads the MSR right now. "hmR0VmxLazy" is simply a
1524 * common prefix for functions dealing with "lazy restoration" of the shared
1525 * MSRs.
1526 *
1527 * @param pVCpu Pointer to the VMCPU.
1528 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1529 * out-of-sync. Make sure to update the required fields
1530 * before using them.
1531 *
1532 * @remarks No-long-jump zone!!!
1533 */
1534static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1535{
1536 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1537 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1538
1539#define VMXLOCAL_LAZY_LOAD_GUEST_MSR(uMsr, a_GuestMsr, a_HostMsr) \
1540 do { \
1541 if (pMixedCtx->msr##a_GuestMsr != pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr) \
1542 ASMWrMsr(uMsr, pMixedCtx->msr##a_GuestMsr); \
1543 else \
1544 Assert(ASMRdMsr(uMsr) == pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr); \
1545 } while (0)
1546
1547 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1548 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
1549 {
1550 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_LSTAR, LSTAR, LStar);
1551 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K6_STAR, STAR, Star);
1552 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, SFMask);
1553 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBase);
1554 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;
1555 }
1556 else
1557 {
1558 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR);
1559 ASMWrMsr(MSR_K6_STAR, pMixedCtx->msrSTAR);
1560 ASMWrMsr(MSR_K8_SF_MASK, pMixedCtx->msrSFMASK);
1561 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE);
1562 }
1563
1564#undef VMXLOCAL_LAZY_LOAD_GUEST_MSR
1565}
1566
1567
1568/**
1569 * Performs lazy restoration of the set of host MSRs if they were previously
1570 * loaded with guest MSR values.
1571 *
1572 * @param pVCpu Pointer to the VMCPU.
1573 *
1574 * @remarks No-long-jump zone!!!
1575 * @remarks The guest MSRs should have been saved back into the guest-CPU
1576 * context by hmR0VmxSaveGuestLazyMsrs()!!!
1577 */
1578static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu)
1579{
1580 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1581 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1582
1583 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1584 {
1585 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1586 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64HostLStarMsr);
1587 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64HostStarMsr);
1588 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64HostSFMaskMsr);
1589 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr);
1590 }
1591 pVCpu->hm.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST);
1592}
1593#endif /* HC_ARCH_BITS == 64 */
1594
1595
1596/**
1597 * Verifies that our cached values of the VMCS controls are all
1598 * consistent with what's actually present in the VMCS.
1599 *
1600 * @returns VBox status code.
1601 * @param pVCpu Pointer to the VMCPU.
1602 */
1603static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu)
1604{
1605 uint32_t u32Val;
1606 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
1607 AssertRCReturn(rc, rc);
1608 AssertMsgReturn(pVCpu->hm.s.vmx.u32EntryCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32EntryCtls, u32Val),
1609 VERR_VMX_ENTRY_CTLS_CACHE_INVALID);
1610
1611 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
1612 AssertRCReturn(rc, rc);
1613 AssertMsgReturn(pVCpu->hm.s.vmx.u32ExitCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ExitCtls, u32Val),
1614 VERR_VMX_EXIT_CTLS_CACHE_INVALID);
1615
1616 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1617 AssertRCReturn(rc, rc);
1618 AssertMsgReturn(pVCpu->hm.s.vmx.u32PinCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32PinCtls, u32Val),
1619 VERR_VMX_PIN_EXEC_CTLS_CACHE_INVALID);
1620
1621 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1622 AssertRCReturn(rc, rc);
1623 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls, u32Val),
1624 VERR_VMX_PROC_EXEC_CTLS_CACHE_INVALID);
1625
1626 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1627 {
1628 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1629 AssertRCReturn(rc, rc);
1630 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls2 == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls2, u32Val),
1631 VERR_VMX_PROC_EXEC2_CTLS_CACHE_INVALID);
1632 }
1633
1634 return VINF_SUCCESS;
1635}
1636
1637
1638#ifdef VBOX_STRICT
1639/**
1640 * Verifies that our cached host EFER value has not changed
1641 * since we cached it.
1642 *
1643 * @param pVCpu Pointer to the VMCPU.
1644 */
1645static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu)
1646{
1647 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1648
1649 if (pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
1650 {
1651 uint64_t u64Val;
1652 int rc = VMXReadVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, &u64Val);
1653 AssertRC(rc);
1654
1655 uint64_t u64HostEferMsr = ASMRdMsr(MSR_K6_EFER);
1656 AssertMsgReturnVoid(u64HostEferMsr == u64Val, ("u64HostEferMsr=%#RX64 u64Val=%#RX64\n", u64HostEferMsr, u64Val));
1657 }
1658}
1659
1660
1661/**
1662 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the
1663 * VMCS are correct.
1664 *
1665 * @param pVCpu Pointer to the VMCPU.
1666 */
1667static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu)
1668{
1669 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1670
1671 /* Verify MSR counts in the VMCS are what we think it should be. */
1672 uint32_t cMsrs;
1673 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1674 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1675
1676 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs); AssertRC(rc);
1677 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1678
1679 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1680 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1681
1682 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1683 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1684 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1685 {
1686 /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */
1687 AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32 cMsrs=%u\n", pHostMsr->u32Msr,
1688 pGuestMsr->u32Msr, cMsrs));
1689
1690 uint64_t u64Msr = ASMRdMsr(pHostMsr->u32Msr);
1691 AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
1692 pHostMsr->u32Msr, pHostMsr->u64Value, u64Msr, cMsrs));
1693
1694 /* Verify that the permissions are as expected in the MSR bitmap. */
1695 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1696 {
1697 VMXMSREXITREAD enmRead;
1698 VMXMSREXITWRITE enmWrite;
1699 rc = hmR0VmxGetMsrPermission(pVCpu, pGuestMsr->u32Msr, &enmRead, &enmWrite);
1700 AssertMsgReturnVoid(rc == VINF_SUCCESS, ("hmR0VmxGetMsrPermission! failed. rc=%Rrc\n", rc));
1701 if (pGuestMsr->u32Msr == MSR_K6_EFER)
1702 {
1703 AssertMsgReturnVoid(enmRead == VMXMSREXIT_INTERCEPT_READ, ("Passthru read for EFER!?\n"));
1704 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_INTERCEPT_WRITE, ("Passthru write for EFER!?\n"));
1705 }
1706 else
1707 {
1708 AssertMsgReturnVoid(enmRead == VMXMSREXIT_PASSTHRU_READ, ("u32Msr=%#RX32 cMsrs=%u No passthru read!\n",
1709 pGuestMsr->u32Msr, cMsrs));
1710 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 cMsrs=%u No passthru write!\n",
1711 pGuestMsr->u32Msr, cMsrs));
1712 }
1713 }
1714 }
1715}
1716#endif /* VBOX_STRICT */
1717
1718
1719/**
1720 * Flushes the TLB using EPT.
1721 *
1722 * @returns VBox status code.
1723 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1724 * enmFlush).
1725 * @param enmFlush Type of flush.
1726 *
1727 * @remarks Caller is responsible for making sure this function is called only
1728 * when NestedPaging is supported and providing @a enmFlush that is
1729 * supported by the CPU.
1730 * @remarks Can be called with interrupts disabled.
1731 */
1732static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush)
1733{
1734 uint64_t au64Descriptor[2];
1735 if (enmFlush == VMXFLUSHEPT_ALL_CONTEXTS)
1736 au64Descriptor[0] = 0;
1737 else
1738 {
1739 Assert(pVCpu);
1740 au64Descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1741 }
1742 au64Descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1743
1744 int rc = VMXR0InvEPT(enmFlush, &au64Descriptor[0]);
1745 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0,
1746 rc));
1747 if ( RT_SUCCESS(rc)
1748 && pVCpu)
1749 {
1750 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1751 }
1752}
1753
1754
1755/**
1756 * Flushes the TLB using VPID.
1757 *
1758 * @returns VBox status code.
1759 * @param pVM Pointer to the VM.
1760 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1761 * enmFlush).
1762 * @param enmFlush Type of flush.
1763 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1764 * on @a enmFlush).
1765 *
1766 * @remarks Can be called with interrupts disabled.
1767 */
1768static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr)
1769{
1770 NOREF(pVM);
1771 AssertPtr(pVM);
1772 Assert(pVM->hm.s.vmx.fVpid);
1773
1774 uint64_t au64Descriptor[2];
1775 if (enmFlush == VMXFLUSHVPID_ALL_CONTEXTS)
1776 {
1777 au64Descriptor[0] = 0;
1778 au64Descriptor[1] = 0;
1779 }
1780 else
1781 {
1782 AssertPtr(pVCpu);
1783 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1784 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1785 au64Descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1786 au64Descriptor[1] = GCPtr;
1787 }
1788
1789 int rc = VMXR0InvVPID(enmFlush, &au64Descriptor[0]); NOREF(rc);
1790 AssertMsg(rc == VINF_SUCCESS,
1791 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1792 if ( RT_SUCCESS(rc)
1793 && pVCpu)
1794 {
1795 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1796 }
1797}
1798
1799
1800/**
1801 * Invalidates a guest page by guest virtual address. Only relevant for
1802 * EPT/VPID, otherwise there is nothing really to invalidate.
1803 *
1804 * @returns VBox status code.
1805 * @param pVM Pointer to the VM.
1806 * @param pVCpu Pointer to the VMCPU.
1807 * @param GCVirt Guest virtual address of the page to invalidate.
1808 */
1809VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1810{
1811 AssertPtr(pVM);
1812 AssertPtr(pVCpu);
1813 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
1814
1815 bool fFlushPending = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
1816 if (!fFlushPending)
1817 {
1818 /*
1819 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
1820 * See @bugref{6043} and @bugref{6177}.
1821 *
1822 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
1823 * function maybe called in a loop with individual addresses.
1824 */
1825 if (pVM->hm.s.vmx.fVpid)
1826 {
1827 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1828 {
1829 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, GCVirt);
1830 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1831 }
1832 else
1833 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1834 }
1835 else if (pVM->hm.s.fNestedPaging)
1836 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1837 }
1838
1839 return VINF_SUCCESS;
1840}
1841
1842
1843/**
1844 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
1845 * otherwise there is nothing really to invalidate.
1846 *
1847 * @returns VBox status code.
1848 * @param pVM Pointer to the VM.
1849 * @param pVCpu Pointer to the VMCPU.
1850 * @param GCPhys Guest physical address of the page to invalidate.
1851 */
1852VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1853{
1854 NOREF(pVM); NOREF(GCPhys);
1855 LogFlowFunc(("%RGp\n", GCPhys));
1856
1857 /*
1858 * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
1859 * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
1860 * This function might be called in a loop. This should cause a flush-by-EPT if EPT is in use. See @bugref{6568}.
1861 */
1862 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1863 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
1864 return VINF_SUCCESS;
1865}
1866
1867
1868/**
1869 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1870 * case where neither EPT nor VPID is supported by the CPU.
1871 *
1872 * @param pVM Pointer to the VM.
1873 * @param pVCpu Pointer to the VMCPU.
1874 * @param pCpu Pointer to the global HM struct.
1875 *
1876 * @remarks Called with interrupts disabled.
1877 */
1878static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1879{
1880 AssertPtr(pVCpu);
1881 AssertPtr(pCpu);
1882 NOREF(pVM);
1883
1884 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1885
1886 /** @todo TLB shootdown is currently not used. See hmQueueInvlPage(). */
1887#if 0
1888 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1889 pVCpu->hm.s.TlbShootdown.cPages = 0;
1890#endif
1891
1892 Assert(pCpu->idCpu != NIL_RTCPUID);
1893 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1894 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1895 pVCpu->hm.s.fForceTLBFlush = false;
1896 return;
1897}
1898
1899
1900/**
1901 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1902 *
1903 * @param pVM Pointer to the VM.
1904 * @param pVCpu Pointer to the VMCPU.
1905 * @param pCpu Pointer to the global HM CPU struct.
1906 * @remarks All references to "ASID" in this function pertains to "VPID" in
1907 * Intel's nomenclature. The reason is, to avoid confusion in compare
1908 * statements since the host-CPU copies are named "ASID".
1909 *
1910 * @remarks Called with interrupts disabled.
1911 */
1912static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1913{
1914#ifdef VBOX_WITH_STATISTICS
1915 bool fTlbFlushed = false;
1916# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
1917# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
1918 if (!fTlbFlushed) \
1919 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
1920 } while (0)
1921#else
1922# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
1923# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
1924#endif
1925
1926 AssertPtr(pVM);
1927 AssertPtr(pCpu);
1928 AssertPtr(pVCpu);
1929 Assert(pCpu->idCpu != NIL_RTCPUID);
1930
1931 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1932 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1933 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1934
1935 /*
1936 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1937 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1938 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1939 */
1940 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1941 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1942 {
1943 ++pCpu->uCurrentAsid;
1944 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1945 {
1946 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
1947 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1948 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1949 }
1950
1951 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1952 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1953 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1954
1955 /*
1956 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
1957 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
1958 */
1959 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1960 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1961 HMVMX_SET_TAGGED_TLB_FLUSHED();
1962 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); /* Already flushed-by-EPT, skip doing it again below. */
1963 }
1964
1965 /* Check for explicit TLB shootdowns. */
1966 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1967 {
1968 /*
1969 * Changes to the EPT paging structure by VMM requires flushing by EPT as the CPU creates
1970 * guest-physical (only EPT-tagged) mappings while traversing the EPT tables when EPT is in use.
1971 * Flushing by VPID will only flush linear (only VPID-tagged) and combined (EPT+VPID tagged) mappings
1972 * but not guest-physical mappings.
1973 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}.
1974 */
1975 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1976 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1977 HMVMX_SET_TAGGED_TLB_FLUSHED();
1978 }
1979
1980 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
1981 * where it is commented out. Support individual entry flushing
1982 * someday. */
1983#if 0
1984 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1985 {
1986 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1987
1988 /*
1989 * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
1990 * as supported by the CPU.
1991 */
1992 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1993 {
1994 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1995 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1996 }
1997 else
1998 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1999
2000 HMVMX_SET_TAGGED_TLB_FLUSHED();
2001 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2002 pVCpu->hm.s.TlbShootdown.cPages = 0;
2003 }
2004#endif
2005
2006 pVCpu->hm.s.fForceTLBFlush = false;
2007
2008 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
2009
2010 Assert(pVCpu->hm.s.idLastCpu == pCpu->idCpu);
2011 Assert(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes);
2012 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2013 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2014 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2015 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
2016 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2017 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2018 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2019
2020 /* Update VMCS with the VPID. */
2021 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
2022 AssertRC(rc);
2023
2024#undef HMVMX_SET_TAGGED_TLB_FLUSHED
2025}
2026
2027
2028/**
2029 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
2030 *
2031 * @returns VBox status code.
2032 * @param pVM Pointer to the VM.
2033 * @param pVCpu Pointer to the VMCPU.
2034 * @param pCpu Pointer to the global HM CPU struct.
2035 *
2036 * @remarks Called with interrupts disabled.
2037 */
2038static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2039{
2040 AssertPtr(pVM);
2041 AssertPtr(pVCpu);
2042 AssertPtr(pCpu);
2043 Assert(pCpu->idCpu != NIL_RTCPUID);
2044 AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
2045 AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
2046
2047 /*
2048 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
2049 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
2050 */
2051 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2052 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2053 {
2054 pVCpu->hm.s.fForceTLBFlush = true;
2055 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2056 }
2057
2058 /* Check for explicit TLB shootdown flushes. */
2059 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2060 {
2061 pVCpu->hm.s.fForceTLBFlush = true;
2062 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2063 }
2064
2065 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2066 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2067
2068 if (pVCpu->hm.s.fForceTLBFlush)
2069 {
2070 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2071 pVCpu->hm.s.fForceTLBFlush = false;
2072 }
2073 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
2074 * where it is commented out. Support individual entry flushing
2075 * someday. */
2076#if 0
2077 else
2078 {
2079 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2080 {
2081 /* We cannot flush individual entries without VPID support. Flush using EPT. */
2082 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
2083 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2084 }
2085 else
2086 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
2087
2088 pVCpu->hm.s.TlbShootdown.cPages = 0;
2089 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2090 }
2091#endif
2092}
2093
2094
2095/**
2096 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
2097 *
2098 * @returns VBox status code.
2099 * @param pVM Pointer to the VM.
2100 * @param pVCpu Pointer to the VMCPU.
2101 * @param pCpu Pointer to the global HM CPU struct.
2102 *
2103 * @remarks Called with interrupts disabled.
2104 */
2105static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2106{
2107 AssertPtr(pVM);
2108 AssertPtr(pVCpu);
2109 AssertPtr(pCpu);
2110 Assert(pCpu->idCpu != NIL_RTCPUID);
2111 AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
2112 AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
2113
2114 /*
2115 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
2116 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
2117 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
2118 */
2119 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2120 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2121 {
2122 pVCpu->hm.s.fForceTLBFlush = true;
2123 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2124 }
2125
2126 /* Check for explicit TLB shootdown flushes. */
2127 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2128 {
2129 /*
2130 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
2131 * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
2132 * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
2133 */
2134 pVCpu->hm.s.fForceTLBFlush = true;
2135 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2136 }
2137
2138 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2139 if (pVCpu->hm.s.fForceTLBFlush)
2140 {
2141 ++pCpu->uCurrentAsid;
2142 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
2143 {
2144 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
2145 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
2146 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
2147 }
2148
2149 pVCpu->hm.s.fForceTLBFlush = false;
2150 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2151 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
2152 if (pCpu->fFlushAsidBeforeUse)
2153 {
2154 if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_SINGLE_CONTEXT)
2155 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
2156 else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_ALL_CONTEXTS)
2157 {
2158 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
2159 pCpu->fFlushAsidBeforeUse = false;
2160 }
2161 else
2162 {
2163 /* hmR0VmxSetupTaggedTlb() ensures we never get here. Paranoia. */
2164 AssertMsgFailed(("Unsupported VPID-flush context type.\n"));
2165 }
2166 }
2167 }
2168 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
2169 * where it is commented out. Support individual entry flushing
2170 * someday. */
2171#if 0
2172 else
2173 {
2174 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
2175 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
2176 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
2177 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
2178
2179 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2180 {
2181 /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
2182 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2183 {
2184 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
2185 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
2186 }
2187 else
2188 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
2189
2190 pVCpu->hm.s.TlbShootdown.cPages = 0;
2191 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2192 }
2193 else
2194 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
2195 }
2196#endif
2197
2198 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2199 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2200 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2201 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
2202 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2203 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2204 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2205
2206 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
2207 AssertRC(rc);
2208}
2209
2210
2211/**
2212 * Flushes the guest TLB entry based on CPU capabilities.
2213 *
2214 * @param pVCpu Pointer to the VMCPU.
2215 * @param pCpu Pointer to the global HM CPU struct.
2216 */
2217DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2218{
2219#ifdef HMVMX_ALWAYS_FLUSH_TLB
2220 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2221#endif
2222 PVM pVM = pVCpu->CTX_SUFF(pVM);
2223 switch (pVM->hm.s.vmx.uFlushTaggedTlb)
2224 {
2225 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu, pCpu); break;
2226 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu, pCpu); break;
2227 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu, pCpu); break;
2228 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu, pCpu); break;
2229 default:
2230 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
2231 break;
2232 }
2233
2234 /* VMCPU_FF_TLB_SHOOTDOWN is unused. */
2235 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN));
2236
2237 /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */
2238}
2239
2240
2241/**
2242 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
2243 * TLB entries from the host TLB before VM-entry.
2244 *
2245 * @returns VBox status code.
2246 * @param pVM Pointer to the VM.
2247 */
2248static int hmR0VmxSetupTaggedTlb(PVM pVM)
2249{
2250 /*
2251 * Determine optimal flush type for Nested Paging.
2252 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
2253 * guest execution (see hmR3InitFinalizeR0()).
2254 */
2255 if (pVM->hm.s.fNestedPaging)
2256 {
2257 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
2258 {
2259 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
2260 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_SINGLE_CONTEXT;
2261 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
2262 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_ALL_CONTEXTS;
2263 else
2264 {
2265 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
2266 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2267 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2268 }
2269
2270 /* Make sure the write-back cacheable memory type for EPT is supported. */
2271 if (!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
2272 {
2273 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.Msrs.u64EptVpidCaps));
2274 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2275 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2276 }
2277 }
2278 else
2279 {
2280 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
2281 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2282 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2283 }
2284 }
2285
2286 /*
2287 * Determine optimal flush type for VPID.
2288 */
2289 if (pVM->hm.s.vmx.fVpid)
2290 {
2291 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
2292 {
2293 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
2294 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_SINGLE_CONTEXT;
2295 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
2296 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_ALL_CONTEXTS;
2297 else
2298 {
2299 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
2300 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2301 LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
2302 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
2303 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
2304 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
2305 pVM->hm.s.vmx.fVpid = false;
2306 }
2307 }
2308 else
2309 {
2310 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
2311 Log4(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
2312 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
2313 pVM->hm.s.vmx.fVpid = false;
2314 }
2315 }
2316
2317 /*
2318 * Setup the handler for flushing tagged-TLBs.
2319 */
2320 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
2321 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID;
2322 else if (pVM->hm.s.fNestedPaging)
2323 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT;
2324 else if (pVM->hm.s.vmx.fVpid)
2325 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID;
2326 else
2327 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE;
2328 return VINF_SUCCESS;
2329}
2330
2331
2332/**
2333 * Sets up pin-based VM-execution controls in the VMCS.
2334 *
2335 * @returns VBox status code.
2336 * @param pVM Pointer to the VM.
2337 * @param pVCpu Pointer to the VMCPU.
2338 */
2339static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
2340{
2341 AssertPtr(pVM);
2342 AssertPtr(pVCpu);
2343
2344 uint32_t val = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */
2345 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
2346
2347 val |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts cause a VM-exit. */
2348 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */
2349
2350 if (pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
2351 val |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */
2352
2353 /* Enable the VMX preemption timer. */
2354 if (pVM->hm.s.vmx.fUsePreemptTimer)
2355 {
2356 Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
2357 val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
2358 }
2359
2360 if ((val & zap) != val)
2361 {
2362 LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
2363 pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, val, zap));
2364 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
2365 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2366 }
2367
2368 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, val);
2369 AssertRCReturn(rc, rc);
2370
2371 pVCpu->hm.s.vmx.u32PinCtls = val;
2372 return rc;
2373}
2374
2375
2376/**
2377 * Sets up processor-based VM-execution controls in the VMCS.
2378 *
2379 * @returns VBox status code.
2380 * @param pVM Pointer to the VM.
2381 * @param pVMCPU Pointer to the VMCPU.
2382 */
2383static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
2384{
2385 AssertPtr(pVM);
2386 AssertPtr(pVCpu);
2387
2388 int rc = VERR_INTERNAL_ERROR_5;
2389 uint32_t val = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */
2390 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2391
2392 val |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */
2393 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
2394 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
2395 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
2396 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */
2397 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */
2398 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
2399
2400 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
2401 if ( !(pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
2402 || (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
2403 {
2404 LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
2405 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
2406 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2407 }
2408
2409 /* Without Nested Paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
2410 if (!pVM->hm.s.fNestedPaging)
2411 {
2412 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
2413 val |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
2414 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
2415 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
2416 }
2417
2418 /* Use TPR shadowing if supported by the CPU. */
2419 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
2420 {
2421 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2422 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
2423 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
2424 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
2425 AssertRCReturn(rc, rc);
2426
2427 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
2428 /* CR8 writes cause a VM-exit based on TPR threshold. */
2429 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
2430 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
2431 }
2432 else
2433 {
2434 /*
2435 * Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is invalid on 32-bit Intel CPUs.
2436 * Set this control only for 64-bit guests.
2437 */
2438 if (pVM->hm.s.fAllow64BitGuests)
2439 {
2440 val |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */
2441 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */
2442 }
2443 }
2444
2445 /* Use MSR-bitmaps if supported by the CPU. */
2446 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
2447 {
2448 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
2449
2450 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2451 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
2452 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2453 AssertRCReturn(rc, rc);
2454
2455 /*
2456 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
2457 * automatically using dedicated fields in the VMCS.
2458 */
2459 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2460 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2461 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2462 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2463 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2464
2465#if HC_ARCH_BITS == 64
2466 /*
2467 * Set passthru permissions for the following MSRs (mandatory for VT-x) required for 64-bit guests.
2468 */
2469 if (pVM->hm.s.fAllow64BitGuests)
2470 {
2471 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2472 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2473 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2474 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2475 }
2476#endif
2477 }
2478
2479 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
2480 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
2481 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
2482
2483 if ((val & zap) != val)
2484 {
2485 LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
2486 pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, val, zap));
2487 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
2488 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2489 }
2490
2491 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, val);
2492 AssertRCReturn(rc, rc);
2493
2494 pVCpu->hm.s.vmx.u32ProcCtls = val;
2495
2496 /*
2497 * Secondary processor-based VM-execution controls.
2498 */
2499 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
2500 {
2501 val = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
2502 zap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2503
2504 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
2505 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
2506
2507 if (pVM->hm.s.fNestedPaging)
2508 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */
2509 else
2510 {
2511 /*
2512 * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
2513 * VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT when INVPCID is executed by the guest.
2514 * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
2515 */
2516 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
2517 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
2518 }
2519
2520 if (pVM->hm.s.vmx.fVpid)
2521 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */
2522
2523 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2524 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
2525
2526 /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
2527 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
2528 * done dynamically. */
2529 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
2530 {
2531 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
2532 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
2533 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
2534 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
2535 AssertRCReturn(rc, rc);
2536 }
2537
2538 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
2539 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
2540
2541 if ((val & zap) != val)
2542 {
2543 LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! "
2544 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, val, zap));
2545 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
2546 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2547 }
2548
2549 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, val);
2550 AssertRCReturn(rc, rc);
2551
2552 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
2553 }
2554 else if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest))
2555 {
2556 LogRel(("hmR0VmxSetupProcCtls: Unrestricted Guest set as true when secondary processor-based VM-execution controls not "
2557 "available\n"));
2558 pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;
2559 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2560 }
2561
2562 return VINF_SUCCESS;
2563}
2564
2565
2566/**
2567 * Sets up miscellaneous (everything other than Pin & Processor-based
2568 * VM-execution) control fields in the VMCS.
2569 *
2570 * @returns VBox status code.
2571 * @param pVM Pointer to the VM.
2572 * @param pVCpu Pointer to the VMCPU.
2573 */
2574static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
2575{
2576 NOREF(pVM);
2577 AssertPtr(pVM);
2578 AssertPtr(pVCpu);
2579
2580 int rc = VERR_GENERAL_FAILURE;
2581
2582 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2583#if 0
2584 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestCR3AndCR4())*/
2585 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); AssertRCReturn(rc, rc);
2586 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); AssertRCReturn(rc, rc);
2587
2588 /*
2589 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
2590 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
2591 * We thus use the exception bitmap to control it rather than use both.
2592 */
2593 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0); AssertRCReturn(rc, rc);
2594 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); AssertRCReturn(rc, rc);
2595
2596 /** @todo Explore possibility of using IO-bitmaps. */
2597 /* All IO & IOIO instructions cause VM-exits. */
2598 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); AssertRCReturn(rc, rc);
2599 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); AssertRCReturn(rc, rc);
2600
2601 /* Initialize the MSR-bitmap area. */
2602 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
2603 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); AssertRCReturn(rc, rc);
2604 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
2605#endif
2606
2607 /* Setup MSR auto-load/store area. */
2608 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
2609 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
2610 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2611 AssertRCReturn(rc, rc);
2612 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2613 AssertRCReturn(rc, rc);
2614
2615 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
2616 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
2617 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
2618 AssertRCReturn(rc, rc);
2619
2620 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
2621 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
2622 AssertRCReturn(rc, rc);
2623
2624 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2625#if 0
2626 /* Setup debug controls */
2627 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
2628 AssertRCReturn(rc, rc);
2629 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
2630 AssertRCReturn(rc, rc);
2631#endif
2632
2633 return rc;
2634}
2635
2636
2637/**
2638 * Sets up the initial exception bitmap in the VMCS based on static conditions.
2639 *
2640 * @returns VBox status code.
2641 * @param pVM Pointer to the VM.
2642 * @param pVCpu Pointer to the VMCPU.
2643 */
2644static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
2645{
2646 AssertPtr(pVM);
2647 AssertPtr(pVCpu);
2648
2649 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
2650
2651 uint32_t u32XcptBitmap = pVCpu->hm.s.fGIMTrapXcptUD ? RT_BIT(X86_XCPT_UD) : 0;
2652
2653 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
2654 if (!pVM->hm.s.fNestedPaging)
2655 u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
2656
2657 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
2658 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
2659 AssertRCReturn(rc, rc);
2660 return rc;
2661}
2662
2663
2664/**
2665 * Sets up the initial guest-state mask. The guest-state mask is consulted
2666 * before reading guest-state fields from the VMCS as VMREADs can be expensive
2667 * for the nested virtualization case (as it would cause a VM-exit).
2668 *
2669 * @param pVCpu Pointer to the VMCPU.
2670 */
2671static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
2672{
2673 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
2674 HMVMXCPU_GST_RESET_TO(pVCpu, HMVMX_UPDATED_GUEST_ALL);
2675 return VINF_SUCCESS;
2676}
2677
2678
2679/**
2680 * Does per-VM VT-x initialization.
2681 *
2682 * @returns VBox status code.
2683 * @param pVM Pointer to the VM.
2684 */
2685VMMR0DECL(int) VMXR0InitVM(PVM pVM)
2686{
2687 LogFlowFunc(("pVM=%p\n", pVM));
2688
2689 int rc = hmR0VmxStructsAlloc(pVM);
2690 if (RT_FAILURE(rc))
2691 {
2692 LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
2693 return rc;
2694 }
2695
2696 return VINF_SUCCESS;
2697}
2698
2699
2700/**
2701 * Does per-VM VT-x termination.
2702 *
2703 * @returns VBox status code.
2704 * @param pVM Pointer to the VM.
2705 */
2706VMMR0DECL(int) VMXR0TermVM(PVM pVM)
2707{
2708 LogFlowFunc(("pVM=%p\n", pVM));
2709
2710#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2711 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
2712 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
2713#endif
2714 hmR0VmxStructsFree(pVM);
2715 return VINF_SUCCESS;
2716}
2717
2718
2719/**
2720 * Sets up the VM for execution under VT-x.
2721 * This function is only called once per-VM during initialization.
2722 *
2723 * @returns VBox status code.
2724 * @param pVM Pointer to the VM.
2725 */
2726VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
2727{
2728 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
2729 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2730
2731 LogFlowFunc(("pVM=%p\n", pVM));
2732
2733 /*
2734 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
2735 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0Intel().
2736 */
2737 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
2738 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
2739 || !pVM->hm.s.vmx.pRealModeTSS))
2740 {
2741 LogRel(("VMXR0SetupVM: invalid real-on-v86 state.\n"));
2742 return VERR_INTERNAL_ERROR;
2743 }
2744
2745#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2746 /*
2747 * This is for the darwin 32-bit/PAE kernels trying to execute 64-bit guests. We don't bother with
2748 * the 32<->64 switcher in this case. This is a rare, legacy use-case with barely any test coverage.
2749 */
2750 if ( pVM->hm.s.fAllow64BitGuests
2751 && !HMVMX_IS_64BIT_HOST_MODE())
2752 {
2753 LogRel(("VMXR0SetupVM: Unsupported guest and host paging mode combination.\n"));
2754 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
2755 }
2756#endif
2757
2758 /* Initialize these always, see hmR3InitFinalizeR0().*/
2759 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NONE;
2760 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NONE;
2761
2762 /* Setup the tagged-TLB flush handlers. */
2763 int rc = hmR0VmxSetupTaggedTlb(pVM);
2764 if (RT_FAILURE(rc))
2765 {
2766 LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
2767 return rc;
2768 }
2769
2770 /* Check if we can use the VMCS controls for swapping the EFER MSR. */
2771 Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
2772#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2773 if ( HMVMX_IS_64BIT_HOST_MODE()
2774 && (pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
2775 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
2776 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR))
2777 {
2778 pVM->hm.s.vmx.fSupportsVmcsEfer = true;
2779 }
2780#endif
2781
2782 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2783 {
2784 PVMCPU pVCpu = &pVM->aCpus[i];
2785 AssertPtr(pVCpu);
2786 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
2787
2788 /* Log the VCPU pointers, useful for debugging SMP VMs. */
2789 Log4(("VMXR0SetupVM: pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
2790
2791 /* Initialize the VM-exit history array with end-of-array markers (UINT16_MAX). */
2792 Assert(!pVCpu->hm.s.idxExitHistoryFree);
2793 HMCPU_EXIT_HISTORY_RESET(pVCpu);
2794
2795 /* Set revision dword at the beginning of the VMCS structure. */
2796 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
2797
2798 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
2799 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2800 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2801 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2802
2803 /* Load this VMCS as the current VMCS. */
2804 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2805 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2806 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2807
2808 rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
2809 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2810 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2811
2812 rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
2813 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2814 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2815
2816 rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
2817 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2818 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2819
2820 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
2821 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2822 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2823
2824 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
2825 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2826 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2827
2828#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2829 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
2830 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2831 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2832#endif
2833
2834 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
2835 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2836 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2837 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2838
2839 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
2840
2841 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
2842 }
2843
2844 return VINF_SUCCESS;
2845}
2846
2847
2848/**
2849 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2850 * the VMCS.
2851 *
2852 * @returns VBox status code.
2853 * @param pVM Pointer to the VM.
2854 * @param pVCpu Pointer to the VMCPU.
2855 */
2856DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
2857{
2858 NOREF(pVM); NOREF(pVCpu);
2859
2860 RTCCUINTREG uReg = ASMGetCR0();
2861 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2862 AssertRCReturn(rc, rc);
2863
2864#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2865 /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */
2866 if (HMVMX_IS_64BIT_HOST_MODE())
2867 {
2868 uint64_t uRegCR3 = HMR0Get64bitCR3();
2869 rc = VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3);
2870 }
2871 else
2872#endif
2873 {
2874 uReg = ASMGetCR3();
2875 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2876 }
2877 AssertRCReturn(rc, rc);
2878
2879 uReg = ASMGetCR4();
2880 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2881 AssertRCReturn(rc, rc);
2882 return rc;
2883}
2884
2885
2886#if HC_ARCH_BITS == 64
2887/**
2888 * Macro for adjusting host segment selectors to satisfy VT-x's VM-entry
2889 * requirements. See hmR0VmxSaveHostSegmentRegs().
2890 */
2891# define VMXLOCAL_ADJUST_HOST_SEG(seg, selValue) \
2892 if ((selValue) & (X86_SEL_RPL | X86_SEL_LDT)) \
2893 { \
2894 bool fValidSelector = true; \
2895 if ((selValue) & X86_SEL_LDT) \
2896 { \
2897 uint32_t uAttr = ASMGetSegAttr((selValue)); \
2898 fValidSelector = RT_BOOL(uAttr != UINT32_MAX && (uAttr & X86_DESC_P)); \
2899 } \
2900 if (fValidSelector) \
2901 { \
2902 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##seg; \
2903 pVCpu->hm.s.vmx.RestoreHost.uHostSel##seg = (selValue); \
2904 } \
2905 (selValue) = 0; \
2906 }
2907#endif
2908
2909
2910/**
2911 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2912 * the host-state area in the VMCS.
2913 *
2914 * @returns VBox status code.
2915 * @param pVM Pointer to the VM.
2916 * @param pVCpu Pointer to the VMCPU.
2917 */
2918DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
2919{
2920 int rc = VERR_INTERNAL_ERROR_5;
2921
2922#if HC_ARCH_BITS == 64
2923 /*
2924 * If we've executed guest code using VT-x, the host-state bits will be messed up. We
2925 * should -not- save the messed up state without restoring the original host-state. See @bugref{7240}.
2926 */
2927 AssertMsgReturn(!(pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED),
2928 ("Re-saving host-state after executing guest code without leaving VT-x!\n"), VERR_WRONG_ORDER);
2929#endif
2930
2931 /*
2932 * Host DS, ES, FS and GS segment registers.
2933 */
2934#if HC_ARCH_BITS == 64
2935 RTSEL uSelDS = ASMGetDS();
2936 RTSEL uSelES = ASMGetES();
2937 RTSEL uSelFS = ASMGetFS();
2938 RTSEL uSelGS = ASMGetGS();
2939#else
2940 RTSEL uSelDS = 0;
2941 RTSEL uSelES = 0;
2942 RTSEL uSelFS = 0;
2943 RTSEL uSelGS = 0;
2944#endif
2945
2946 /* Recalculate which host-state bits need to be manually restored. */
2947 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
2948
2949 /*
2950 * Host CS and SS segment registers.
2951 */
2952#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2953 RTSEL uSelCS;
2954 RTSEL uSelSS;
2955 if (HMVMX_IS_64BIT_HOST_MODE())
2956 {
2957 uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;
2958 uSelSS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;
2959 }
2960 else
2961 {
2962 /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */
2963 uSelCS = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;
2964 uSelSS = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;
2965 }
2966#else
2967 RTSEL uSelCS = ASMGetCS();
2968 RTSEL uSelSS = ASMGetSS();
2969#endif
2970
2971 /*
2972 * Host TR segment register.
2973 */
2974 RTSEL uSelTR = ASMGetTR();
2975
2976#if HC_ARCH_BITS == 64
2977 /*
2978 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them
2979 * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
2980 */
2981 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS);
2982 VMXLOCAL_ADJUST_HOST_SEG(ES, uSelES);
2983 VMXLOCAL_ADJUST_HOST_SEG(FS, uSelFS);
2984 VMXLOCAL_ADJUST_HOST_SEG(GS, uSelGS);
2985# undef VMXLOCAL_ADJUST_HOST_SEG
2986#endif
2987
2988 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2989 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2990 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2991 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2992 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2993 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2994 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2995 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));
2996 Assert(uSelCS);
2997 Assert(uSelTR);
2998
2999 /* Assertion is right but we would not have updated u32ExitCtls yet. */
3000#if 0
3001 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE))
3002 Assert(uSelSS != 0);
3003#endif
3004
3005 /* Write these host selector fields into the host-state area in the VMCS. */
3006 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); AssertRCReturn(rc, rc);
3007 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); AssertRCReturn(rc, rc);
3008#if HC_ARCH_BITS == 64
3009 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); AssertRCReturn(rc, rc);
3010 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); AssertRCReturn(rc, rc);
3011 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS); AssertRCReturn(rc, rc);
3012 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS); AssertRCReturn(rc, rc);
3013#endif
3014 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR); AssertRCReturn(rc, rc);
3015
3016 /*
3017 * Host GDTR and IDTR.
3018 */
3019 RTGDTR Gdtr;
3020 RT_ZERO(Gdtr);
3021#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
3022 if (HMVMX_IS_64BIT_HOST_MODE())
3023 {
3024 X86XDTR64 Gdtr64;
3025 X86XDTR64 Idtr64;
3026 HMR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64);
3027 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr); AssertRCReturn(rc, rc);
3028 rc = VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr); AssertRCReturn(rc, rc);
3029
3030 Gdtr.cbGdt = Gdtr64.cb;
3031 Gdtr.pGdt = (uintptr_t)Gdtr64.uAddr;
3032 }
3033 else
3034#endif
3035 {
3036 RTIDTR Idtr;
3037 ASMGetGDTR(&Gdtr);
3038 ASMGetIDTR(&Idtr);
3039 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); AssertRCReturn(rc, rc);
3040 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); AssertRCReturn(rc, rc);
3041
3042#if HC_ARCH_BITS == 64
3043 /*
3044 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the
3045 * maximum limit (0xffff) on every VM-exit.
3046 */
3047 if (Gdtr.cbGdt != 0xffff)
3048 {
3049 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
3050 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
3051 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
3052 }
3053
3054 /*
3055 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT"
3056 * and Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit as 0xfff, VT-x
3057 * bloating the limit to 0xffff shouldn't cause any different CPU behavior. However, several hosts either insists
3058 * on 0xfff being the limit (Windows Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there
3059 * but botches sidt alignment in at least one consumer). So, we're only allowing IDTR.LIMIT to be left at 0xffff on
3060 * hosts where we are pretty sure it won't cause trouble.
3061 */
3062# if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
3063 if (Idtr.cbIdt < 0x0fff)
3064# else
3065 if (Idtr.cbIdt != 0xffff)
3066# endif
3067 {
3068 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
3069 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
3070 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
3071 }
3072#endif
3073 }
3074
3075 /*
3076 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
3077 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
3078 */
3079 AssertMsgReturn((uSelTR | X86_SEL_RPL_LDT) <= Gdtr.cbGdt,
3080 ("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt),
3081 VERR_VMX_INVALID_HOST_STATE);
3082
3083 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
3084#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
3085 if (HMVMX_IS_64BIT_HOST_MODE())
3086 {
3087 /* We need the 64-bit TR base for hybrid darwin. */
3088 uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc);
3089 rc = VMXWriteVmcs64(VMX_VMCS_HOST_TR_BASE, u64TRBase);
3090 }
3091 else
3092#endif
3093 {
3094 uintptr_t uTRBase;
3095#if HC_ARCH_BITS == 64
3096 uTRBase = X86DESC64_BASE(pDesc);
3097
3098 /*
3099 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits.
3100 * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else.
3101 * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode.
3102 * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
3103 *
3104 * [1] See Intel spec. 3.5 "System Descriptor Types".
3105 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
3106 */
3107 Assert(pDesc->System.u4Type == 11);
3108 if ( pDesc->System.u16LimitLow != 0x67
3109 || pDesc->System.u4LimitHigh)
3110 {
3111 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
3112 /* If the host has made GDT read-only, we would need to temporarily toggle CR0.WP before writing the GDT. */
3113 if (pVM->hm.s.uHostKernelFeatures & SUPKERNELFEATURES_GDT_READ_ONLY)
3114 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_READ_ONLY;
3115 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
3116
3117 /* Store the GDTR here as we need it while restoring TR. */
3118 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
3119 }
3120#else
3121 uTRBase = X86DESC_BASE(pDesc);
3122#endif
3123 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
3124 }
3125 AssertRCReturn(rc, rc);
3126
3127 /*
3128 * Host FS base and GS base.
3129 */
3130#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3131 if (HMVMX_IS_64BIT_HOST_MODE())
3132 {
3133 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
3134 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
3135 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); AssertRCReturn(rc, rc);
3136 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); AssertRCReturn(rc, rc);
3137
3138# if HC_ARCH_BITS == 64
3139 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
3140 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
3141 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
3142 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
3143 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
3144# endif
3145 }
3146#endif
3147 return rc;
3148}
3149
3150
3151/**
3152 * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
3153 * host-state area of the VMCS. Theses MSRs will be automatically restored on
3154 * the host after every successful VM-exit.
3155 *
3156 * @returns VBox status code.
3157 * @param pVM Pointer to the VM.
3158 * @param pVCpu Pointer to the VMCPU.
3159 *
3160 * @remarks No-long-jump zone!!!
3161 */
3162DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
3163{
3164 NOREF(pVM);
3165
3166 AssertPtr(pVCpu);
3167 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
3168
3169 int rc = VINF_SUCCESS;
3170#if HC_ARCH_BITS == 64
3171 if (pVM->hm.s.fAllow64BitGuests)
3172 hmR0VmxLazySaveHostMsrs(pVCpu);
3173#endif
3174
3175 /*
3176 * Host Sysenter MSRs.
3177 */
3178 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
3179 AssertRCReturn(rc, rc);
3180#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
3181 if (HMVMX_IS_64BIT_HOST_MODE())
3182 {
3183 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
3184 AssertRCReturn(rc, rc);
3185 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
3186 }
3187 else
3188 {
3189 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
3190 AssertRCReturn(rc, rc);
3191 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
3192 }
3193#elif HC_ARCH_BITS == 32
3194 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
3195 AssertRCReturn(rc, rc);
3196 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
3197#else
3198 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
3199 AssertRCReturn(rc, rc);
3200 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
3201#endif
3202 AssertRCReturn(rc, rc);
3203
3204 /*
3205 * Host EFER MSR.
3206 * If the CPU supports the newer VMCS controls for managing EFER, use it.
3207 * Otherwise it's done as part of auto-load/store MSR area in the VMCS, see hmR0VmxLoadGuestMsrs().
3208 */
3209 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
3210 {
3211 rc = VMXWriteVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, pVM->hm.s.vmx.u64HostEfer);
3212 AssertRCReturn(rc, rc);
3213 }
3214
3215 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see
3216 * hmR0VmxLoadGuestExitCtls() !! */
3217
3218 return rc;
3219}
3220
3221
3222/**
3223 * Figures out if we need to swap the EFER MSR which is
3224 * particularly expensive.
3225 *
3226 * We check all relevant bits. For now, that's everything
3227 * besides LMA/LME, as these two bits are handled by VM-entry,
3228 * see hmR0VmxLoadGuestExitCtls() and
3229 * hmR0VMxLoadGuestEntryCtls().
3230 *
3231 * @returns true if we need to load guest EFER, false otherwise.
3232 * @param pVCpu Pointer to the VMCPU.
3233 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3234 * out-of-sync. Make sure to update the required fields
3235 * before using them.
3236 *
3237 * @remarks Requires EFER, CR4.
3238 * @remarks No-long-jump zone!!!
3239 */
3240static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3241{
3242#ifdef HMVMX_ALWAYS_SWAP_EFER
3243 return true;
3244#endif
3245
3246#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3247 /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */
3248 if (CPUMIsGuestInLongMode(pVCpu))
3249 return false;
3250#endif
3251
3252 PVM pVM = pVCpu->CTX_SUFF(pVM);
3253 uint64_t u64HostEfer = pVM->hm.s.vmx.u64HostEfer;
3254 uint64_t u64GuestEfer = pMixedCtx->msrEFER;
3255
3256 /*
3257 * For 64-bit guests, if EFER.SCE bit differs, we need to swap to ensure that the
3258 * guest's SYSCALL behaviour isn't screwed. See @bugref{7386}.
3259 */
3260 if ( CPUMIsGuestInLongMode(pVCpu)
3261 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
3262 {
3263 return true;
3264 }
3265
3266 /*
3267 * If the guest uses PAE and EFER.NXE bit differs, we need to swap EFER as it
3268 * affects guest paging. 64-bit paging implies CR4.PAE as well.
3269 * See Intel spec. 4.5 "IA-32e Paging" and Intel spec. 4.1.1 "Three Paging Modes".
3270 */
3271 if ( (pMixedCtx->cr4 & X86_CR4_PAE)
3272 && (pMixedCtx->cr0 & X86_CR0_PG)
3273 && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
3274 {
3275 /* Assert that host is PAE capable. */
3276 Assert(pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_NX);
3277 return true;
3278 }
3279
3280 /** @todo Check the latest Intel spec. for any other bits,
3281 * like SMEP/SMAP? */
3282 return false;
3283}
3284
3285
3286/**
3287 * Sets up VM-entry controls in the VMCS. These controls can affect things done
3288 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
3289 * controls".
3290 *
3291 * @returns VBox status code.
3292 * @param pVCpu Pointer to the VMCPU.
3293 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3294 * out-of-sync. Make sure to update the required fields
3295 * before using them.
3296 *
3297 * @remarks Requires EFER.
3298 * @remarks No-long-jump zone!!!
3299 */
3300DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3301{
3302 int rc = VINF_SUCCESS;
3303 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS))
3304 {
3305 PVM pVM = pVCpu->CTX_SUFF(pVM);
3306 uint32_t val = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */
3307 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3308
3309 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
3310 val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
3311
3312 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
3313 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3314 {
3315 val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
3316 Log4(("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n", pVCpu->idCpu));
3317 }
3318 else
3319 Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
3320
3321 /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */
3322 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3323 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3324 {
3325 val |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR;
3326 Log4(("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n", pVCpu->idCpu));
3327 }
3328
3329 /*
3330 * The following should -not- be set (since we're not in SMM mode):
3331 * - VMX_VMCS_CTRL_ENTRY_ENTRY_SMM
3332 * - VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON
3333 */
3334
3335 /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
3336 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR. */
3337
3338 if ((val & zap) != val)
3339 {
3340 LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
3341 pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0, val, zap));
3342 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
3343 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3344 }
3345
3346 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, val);
3347 AssertRCReturn(rc, rc);
3348
3349 pVCpu->hm.s.vmx.u32EntryCtls = val;
3350 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS);
3351 }
3352 return rc;
3353}
3354
3355
3356/**
3357 * Sets up the VM-exit controls in the VMCS.
3358 *
3359 * @returns VBox status code.
3360 * @param pVM Pointer to the VM.
3361 * @param pVCpu Pointer to the VMCPU.
3362 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3363 * out-of-sync. Make sure to update the required fields
3364 * before using them.
3365 *
3366 * @remarks Requires EFER.
3367 */
3368DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3369{
3370 NOREF(pMixedCtx);
3371
3372 int rc = VINF_SUCCESS;
3373 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS))
3374 {
3375 PVM pVM = pVCpu->CTX_SUFF(pVM);
3376 uint32_t val = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0; /* Bits set here must be set in the VMCS. */
3377 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3378
3379 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
3380 val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
3381
3382 /*
3383 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary.
3384 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in hmR0VmxSaveHostMsrs().
3385 */
3386#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3387 if (HMVMX_IS_64BIT_HOST_MODE())
3388 {
3389 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3390 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
3391 }
3392 else
3393 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
3394#else
3395 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3396 {
3397 /* The switcher returns to long mode, EFER is managed by the switcher. */
3398 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3399 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
3400 }
3401 else
3402 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
3403#endif /* HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
3404
3405 /* If the newer VMCS fields for managing EFER exists, use it. */
3406 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3407 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3408 {
3409 val |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR
3410 | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;
3411 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n", pVCpu->idCpu));
3412 }
3413
3414 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
3415 Assert(!(val & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
3416
3417 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
3418 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
3419 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR. */
3420
3421 if ( pVM->hm.s.vmx.fUsePreemptTimer
3422 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER))
3423 val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
3424
3425 if ((val & zap) != val)
3426 {
3427 LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
3428 pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, val, zap));
3429 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
3430 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3431 }
3432
3433 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, val);
3434 AssertRCReturn(rc, rc);
3435
3436 pVCpu->hm.s.vmx.u32ExitCtls = val;
3437 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_EXIT_CTLS);
3438 }
3439 return rc;
3440}
3441
3442
3443/**
3444 * Loads the guest APIC and related state.
3445 *
3446 * @returns VBox status code.
3447 * @param pVM Pointer to the VM.
3448 * @param pVCpu Pointer to the VMCPU.
3449 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3450 * out-of-sync. Make sure to update the required fields
3451 * before using them.
3452 */
3453DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3454{
3455 NOREF(pMixedCtx);
3456
3457 int rc = VINF_SUCCESS;
3458 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE))
3459 {
3460 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
3461 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
3462 {
3463 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
3464
3465 bool fPendingIntr = false;
3466 uint8_t u8Tpr = 0;
3467 uint8_t u8PendingIntr = 0;
3468 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
3469 AssertRCReturn(rc, rc);
3470
3471 /*
3472 * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when
3473 * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt.
3474 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
3475 * the interrupt when we VM-exit for other reasons.
3476 */
3477 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
3478 uint32_t u32TprThreshold = 0;
3479 if (fPendingIntr)
3480 {
3481 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */
3482 const uint8_t u8PendingPriority = (u8PendingIntr >> 4) & 0xf;
3483 const uint8_t u8TprPriority = (u8Tpr >> 4) & 0xf;
3484 if (u8PendingPriority <= u8TprPriority)
3485 u32TprThreshold = u8PendingPriority;
3486 else
3487 u32TprThreshold = u8TprPriority; /* Required for Vista 64-bit guest, see @bugref{6398}. */
3488 }
3489 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
3490
3491 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
3492 AssertRCReturn(rc, rc);
3493 }
3494
3495 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
3496 }
3497 return rc;
3498}
3499
3500
3501/**
3502 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
3503 *
3504 * @returns Guest's interruptibility-state.
3505 * @param pVCpu Pointer to the VMCPU.
3506 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3507 * out-of-sync. Make sure to update the required fields
3508 * before using them.
3509 *
3510 * @remarks No-long-jump zone!!!
3511 */
3512DECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3513{
3514 /*
3515 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
3516 */
3517 uint32_t uIntrState = 0;
3518 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3519 {
3520 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
3521 AssertMsg(HMVMXCPU_GST_IS_SET(pVCpu, HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS),
3522 ("%#x\n", HMVMXCPU_GST_VALUE(pVCpu)));
3523 if (pMixedCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
3524 {
3525 if (pMixedCtx->eflags.Bits.u1IF)
3526 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
3527 else
3528 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
3529 }
3530 /* else: Although we can clear the force-flag here, let's keep this side-effects free. */
3531 }
3532
3533 /*
3534 * NMIs to the guest are blocked after an NMI is injected until the guest executes an IRET. We only
3535 * bother with virtual-NMI blocking when we have support for virtual NMIs in the CPU, otherwise
3536 * setting this would block host-NMIs and IRET will not clear the blocking.
3537 *
3538 * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}.
3539 */
3540 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)
3541 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
3542 {
3543 uIntrState |= VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI;
3544 }
3545
3546 return uIntrState;
3547}
3548
3549
3550/**
3551 * Loads the guest's interruptibility-state into the guest-state area in the
3552 * VMCS.
3553 *
3554 * @returns VBox status code.
3555 * @param pVCpu Pointer to the VMCPU.
3556 * @param uIntrState The interruptibility-state to set.
3557 */
3558static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
3559{
3560 NOREF(pVCpu);
3561 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */
3562 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
3563 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
3564 AssertRCReturn(rc, rc);
3565 return rc;
3566}
3567
3568
3569/**
3570 * Loads the exception intercepts required for guest execution in the VMCS.
3571 *
3572 * @returns VBox status code.
3573 * @param pVCpu Pointer to the VMCPU.
3574 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3575 * out-of-sync. Make sure to update the required fields
3576 * before using them.
3577 */
3578static int hmR0VmxLoadGuestXcptIntercepts(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3579{
3580 NOREF(pMixedCtx);
3581 int rc = VINF_SUCCESS;
3582 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
3583 {
3584 /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxLoadSharedCR0(). */
3585 if (pVCpu->hm.s.fGIMTrapXcptUD)
3586 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_UD);
3587 else
3588 {
3589#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3590 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_UD);
3591#endif
3592 }
3593
3594 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3595 AssertRCReturn(rc, rc);
3596
3597 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
3598 Log4(("Load[%RU32]: VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu,
3599 pVCpu->hm.s.vmx.u32XcptBitmap, HMCPU_CF_VALUE(pVCpu)));
3600 }
3601 return rc;
3602}
3603
3604
3605/**
3606 * Loads the guest's RIP into the guest-state area in the VMCS.
3607 *
3608 * @returns VBox status code.
3609 * @param pVCpu Pointer to the VMCPU.
3610 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3611 * out-of-sync. Make sure to update the required fields
3612 * before using them.
3613 *
3614 * @remarks No-long-jump zone!!!
3615 */
3616static int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3617{
3618 int rc = VINF_SUCCESS;
3619 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))
3620 {
3621 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
3622 AssertRCReturn(rc, rc);
3623
3624 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP);
3625 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu, pMixedCtx->rip,
3626 HMCPU_CF_VALUE(pVCpu)));
3627 }
3628 return rc;
3629}
3630
3631
3632/**
3633 * Loads the guest's RSP into the guest-state area in the VMCS.
3634 *
3635 * @returns VBox status code.
3636 * @param pVCpu Pointer to the VMCPU.
3637 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3638 * out-of-sync. Make sure to update the required fields
3639 * before using them.
3640 *
3641 * @remarks No-long-jump zone!!!
3642 */
3643static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3644{
3645 int rc = VINF_SUCCESS;
3646 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP))
3647 {
3648 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
3649 AssertRCReturn(rc, rc);
3650
3651 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP);
3652 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RSP=%#RX64\n", pVCpu->idCpu, pMixedCtx->rsp));
3653 }
3654 return rc;
3655}
3656
3657
3658/**
3659 * Loads the guest's RFLAGS into the guest-state area in the VMCS.
3660 *
3661 * @returns VBox status code.
3662 * @param pVCpu Pointer to the VMCPU.
3663 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3664 * out-of-sync. Make sure to update the required fields
3665 * before using them.
3666 *
3667 * @remarks No-long-jump zone!!!
3668 */
3669static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3670{
3671 int rc = VINF_SUCCESS;
3672 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
3673 {
3674 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
3675 Let us assert it as such and use 32-bit VMWRITE. */
3676 Assert(!(pMixedCtx->rflags.u64 >> 32));
3677 X86EFLAGS Eflags = pMixedCtx->eflags;
3678 /** @todo r=bird: There shall be no need to OR in X86_EFL_1 here, nor
3679 * shall there be any reason for clearing bits 63:22, 15, 5 and 3.
3680 * These will never be cleared/set, unless some other part of the VMM
3681 * code is buggy - in which case we're better of finding and fixing
3682 * those bugs than hiding them. */
3683 Assert(Eflags.u32 & X86_EFL_RA1_MASK);
3684 Assert(!(Eflags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
3685 Eflags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
3686 Eflags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */
3687
3688 /*
3689 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM-exit.
3690 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
3691 */
3692 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3693 {
3694 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3695 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3696 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = Eflags.u32; /* Save the original eflags of the real-mode guest. */
3697 Eflags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
3698 Eflags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
3699 }
3700
3701 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, Eflags.u32);
3702 AssertRCReturn(rc, rc);
3703
3704 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS);
3705 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", pVCpu->idCpu, Eflags.u32));
3706 }
3707 return rc;
3708}
3709
3710
3711/**
3712 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
3713 *
3714 * @returns VBox status code.
3715 * @param pVCpu Pointer to the VMCPU.
3716 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3717 * out-of-sync. Make sure to update the required fields
3718 * before using them.
3719 *
3720 * @remarks No-long-jump zone!!!
3721 */
3722DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3723{
3724 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
3725 AssertRCReturn(rc, rc);
3726 rc = hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
3727 AssertRCReturn(rc, rc);
3728 rc = hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
3729 AssertRCReturn(rc, rc);
3730 return rc;
3731}
3732
3733
3734/**
3735 * Loads the guest CR0 control register into the guest-state area in the VMCS.
3736 * CR0 is partially shared with the host and we have to consider the FPU bits.
3737 *
3738 * @returns VBox status code.
3739 * @param pVM Pointer to the VM.
3740 * @param pVCpu Pointer to the VMCPU.
3741 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3742 * out-of-sync. Make sure to update the required fields
3743 * before using them.
3744 *
3745 * @remarks No-long-jump zone!!!
3746 */
3747static int hmR0VmxLoadSharedCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3748{
3749 /*
3750 * Guest CR0.
3751 * Guest FPU.
3752 */
3753 int rc = VINF_SUCCESS;
3754 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
3755 {
3756 Assert(!(pMixedCtx->cr0 >> 32));
3757 uint32_t u32GuestCR0 = pMixedCtx->cr0;
3758 PVM pVM = pVCpu->CTX_SUFF(pVM);
3759
3760 /* The guest's view (read access) of its CR0 is unblemished. */
3761 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
3762 AssertRCReturn(rc, rc);
3763 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR0));
3764
3765 /* Setup VT-x's view of the guest CR0. */
3766 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
3767 if (pVM->hm.s.fNestedPaging)
3768 {
3769 if (CPUMIsGuestPagingEnabledEx(pMixedCtx))
3770 {
3771 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
3772 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3773 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
3774 }
3775 else
3776 {
3777 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
3778 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3779 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3780 }
3781
3782 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
3783 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3784 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3785
3786 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3787 AssertRCReturn(rc, rc);
3788 }
3789 else
3790 u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
3791
3792 /*
3793 * Guest FPU bits.
3794 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
3795 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
3796 */
3797 u32GuestCR0 |= X86_CR0_NE;
3798 bool fInterceptNM = false;
3799 if (CPUMIsGuestFPUStateActive(pVCpu))
3800 {
3801 fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
3802 /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
3803 We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
3804 }
3805 else
3806 {
3807 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
3808 u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
3809 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
3810 }
3811
3812 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
3813 bool fInterceptMF = false;
3814 if (!(pMixedCtx->cr0 & X86_CR0_NE))
3815 fInterceptMF = true;
3816
3817 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
3818 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3819 {
3820 Assert(PDMVmmDevHeapIsEnabled(pVM));
3821 Assert(pVM->hm.s.vmx.pRealModeTSS);
3822 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
3823 fInterceptNM = true;
3824 fInterceptMF = true;
3825 }
3826 else
3827 {
3828 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626} comment #11. */
3829 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
3830 }
3831 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
3832
3833 if (fInterceptNM)
3834 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
3835 else
3836 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
3837
3838 if (fInterceptMF)
3839 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
3840 else
3841 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
3842
3843 /* Additional intercepts for debugging, define these yourself explicitly. */
3844#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3845 pVCpu->hm.s.vmx.u32XcptBitmap |= 0
3846 | RT_BIT(X86_XCPT_BP)
3847 | RT_BIT(X86_XCPT_DB)
3848 | RT_BIT(X86_XCPT_DE)
3849 | RT_BIT(X86_XCPT_NM)
3850 | RT_BIT(X86_XCPT_TS)
3851 | RT_BIT(X86_XCPT_UD)
3852 | RT_BIT(X86_XCPT_NP)
3853 | RT_BIT(X86_XCPT_SS)
3854 | RT_BIT(X86_XCPT_GP)
3855 | RT_BIT(X86_XCPT_PF)
3856 | RT_BIT(X86_XCPT_MF)
3857 ;
3858#elif defined(HMVMX_ALWAYS_TRAP_PF)
3859 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
3860#endif
3861
3862 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
3863
3864 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
3865 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3866 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3867 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
3868 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
3869 else
3870 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
3871
3872 u32GuestCR0 |= uSetCR0;
3873 u32GuestCR0 &= uZapCR0;
3874 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
3875
3876 /* Write VT-x's view of the guest CR0 into the VMCS. */
3877 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
3878 AssertRCReturn(rc, rc);
3879 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", pVCpu->idCpu, u32GuestCR0, uSetCR0,
3880 uZapCR0));
3881
3882 /*
3883 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
3884 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
3885 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
3886 */
3887 uint32_t u32CR0Mask = 0;
3888 u32CR0Mask = X86_CR0_PE
3889 | X86_CR0_NE
3890 | X86_CR0_WP
3891 | X86_CR0_PG
3892 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
3893 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
3894 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
3895
3896 /** @todo Avoid intercepting CR0.PE with unrestricted guests. Fix PGM
3897 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
3898 * and @bugref{6944}. */
3899#if 0
3900 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3901 u32CR0Mask &= ~X86_CR0_PE;
3902#endif
3903 if (pVM->hm.s.fNestedPaging)
3904 u32CR0Mask &= ~X86_CR0_WP;
3905
3906 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
3907 if (fInterceptNM)
3908 {
3909 u32CR0Mask |= X86_CR0_TS
3910 | X86_CR0_MP;
3911 }
3912
3913 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
3914 pVCpu->hm.s.vmx.u32CR0Mask = u32CR0Mask;
3915 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask);
3916 AssertRCReturn(rc, rc);
3917 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", pVCpu->idCpu, u32CR0Mask));
3918
3919 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
3920 }
3921 return rc;
3922}
3923
3924
3925/**
3926 * Loads the guest control registers (CR3, CR4) into the guest-state area
3927 * in the VMCS.
3928 *
3929 * @returns VBox status code.
3930 * @param pVM Pointer to the VM.
3931 * @param pVCpu Pointer to the VMCPU.
3932 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3933 * out-of-sync. Make sure to update the required fields
3934 * before using them.
3935 *
3936 * @remarks No-long-jump zone!!!
3937 */
3938static int hmR0VmxLoadGuestCR3AndCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3939{
3940 int rc = VINF_SUCCESS;
3941 PVM pVM = pVCpu->CTX_SUFF(pVM);
3942
3943 /*
3944 * Guest CR2.
3945 * It's always loaded in the assembler code. Nothing to do here.
3946 */
3947
3948 /*
3949 * Guest CR3.
3950 */
3951 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
3952 {
3953 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
3954 if (pVM->hm.s.fNestedPaging)
3955 {
3956 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
3957
3958 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
3959 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
3960 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
3961 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
3962
3963 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
3964 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
3965 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
3966
3967 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
3968 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
3969 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x3f) == 0, /* Bits 6:11 MBZ. */
3970 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3971
3972 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
3973 AssertRCReturn(rc, rc);
3974 Log4(("Load[%RU32]: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->idCpu, pVCpu->hm.s.vmx.HCPhysEPTP));
3975
3976 if ( pVM->hm.s.vmx.fUnrestrictedGuest
3977 || CPUMIsGuestPagingEnabledEx(pMixedCtx))
3978 {
3979 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
3980 if (CPUMIsGuestInPAEModeEx(pMixedCtx))
3981 {
3982 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); AssertRCReturn(rc, rc);
3983 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
3984 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
3985 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
3986 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
3987 }
3988
3989 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
3990 have Unrestricted Execution to handle the guest when it's not using paging. */
3991 GCPhysGuestCR3 = pMixedCtx->cr3;
3992 }
3993 else
3994 {
3995 /*
3996 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
3997 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
3998 * EPT takes care of translating it to host-physical addresses.
3999 */
4000 RTGCPHYS GCPhys;
4001 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
4002 Assert(PDMVmmDevHeapIsEnabled(pVM));
4003
4004 /* We obtain it here every time as the guest could have relocated this PCI region. */
4005 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
4006 AssertRCReturn(rc, rc);
4007
4008 GCPhysGuestCR3 = GCPhys;
4009 }
4010
4011 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RGv (GstN)\n", pVCpu->idCpu, GCPhysGuestCR3));
4012 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
4013 }
4014 else
4015 {
4016 /* Non-nested paging case, just use the hypervisor's CR3. */
4017 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
4018
4019 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", pVCpu->idCpu, HCPhysGuestCR3));
4020 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
4021 }
4022 AssertRCReturn(rc, rc);
4023
4024 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
4025 }
4026
4027 /*
4028 * Guest CR4.
4029 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
4030 */
4031 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
4032 {
4033 Assert(!(pMixedCtx->cr4 >> 32));
4034 uint32_t u32GuestCR4 = pMixedCtx->cr4;
4035
4036 /* The guest's view of its CR4 is unblemished. */
4037 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
4038 AssertRCReturn(rc, rc);
4039 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR4));
4040
4041 /* Setup VT-x's view of the guest CR4. */
4042 /*
4043 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
4044 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
4045 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
4046 */
4047 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4048 {
4049 Assert(pVM->hm.s.vmx.pRealModeTSS);
4050 Assert(PDMVmmDevHeapIsEnabled(pVM));
4051 u32GuestCR4 &= ~X86_CR4_VME;
4052 }
4053
4054 if (pVM->hm.s.fNestedPaging)
4055 {
4056 if ( !CPUMIsGuestPagingEnabledEx(pMixedCtx)
4057 && !pVM->hm.s.vmx.fUnrestrictedGuest)
4058 {
4059 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
4060 u32GuestCR4 |= X86_CR4_PSE;
4061 /* Our identity mapping is a 32-bit page directory. */
4062 u32GuestCR4 &= ~X86_CR4_PAE;
4063 }
4064 /* else use guest CR4.*/
4065 }
4066 else
4067 {
4068 /*
4069 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
4070 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
4071 */
4072 switch (pVCpu->hm.s.enmShadowMode)
4073 {
4074 case PGMMODE_REAL: /* Real-mode. */
4075 case PGMMODE_PROTECTED: /* Protected mode without paging. */
4076 case PGMMODE_32_BIT: /* 32-bit paging. */
4077 {
4078 u32GuestCR4 &= ~X86_CR4_PAE;
4079 break;
4080 }
4081
4082 case PGMMODE_PAE: /* PAE paging. */
4083 case PGMMODE_PAE_NX: /* PAE paging with NX. */
4084 {
4085 u32GuestCR4 |= X86_CR4_PAE;
4086 break;
4087 }
4088
4089 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
4090 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
4091#ifdef VBOX_ENABLE_64_BITS_GUESTS
4092 break;
4093#endif
4094 default:
4095 AssertFailed();
4096 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4097 }
4098 }
4099
4100 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
4101 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
4102 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
4103 u32GuestCR4 |= uSetCR4;
4104 u32GuestCR4 &= uZapCR4;
4105
4106 /* Write VT-x's view of the guest CR4 into the VMCS. */
4107 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", pVCpu->idCpu, u32GuestCR4, uSetCR4, uZapCR4));
4108 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4);
4109 AssertRCReturn(rc, rc);
4110
4111 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM-exit. */
4112 uint32_t u32CR4Mask = X86_CR4_VME
4113 | X86_CR4_PAE
4114 | X86_CR4_PGE
4115 | X86_CR4_PSE
4116 | X86_CR4_VMXE;
4117 if (pVM->cpum.ro.HostFeatures.fXSaveRstor)
4118 u32CR4Mask |= X86_CR4_OSXSAVE;
4119 pVCpu->hm.s.vmx.u32CR4Mask = u32CR4Mask;
4120 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask);
4121 AssertRCReturn(rc, rc);
4122
4123 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
4124 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
4125
4126 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
4127 }
4128 return rc;
4129}
4130
4131
4132/**
4133 * Loads the guest debug registers into the guest-state area in the VMCS.
4134 * This also sets up whether #DB and MOV DRx accesses cause VM-exits.
4135 *
4136 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
4137 *
4138 * @returns VBox status code.
4139 * @param pVCpu Pointer to the VMCPU.
4140 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4141 * out-of-sync. Make sure to update the required fields
4142 * before using them.
4143 *
4144 * @remarks No-long-jump zone!!!
4145 */
4146static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4147{
4148 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
4149 return VINF_SUCCESS;
4150
4151#ifdef VBOX_STRICT
4152 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
4153 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
4154 {
4155 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
4156 Assert((pMixedCtx->dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0); /* Bits 63:32, 15, 14, 12, 11 are reserved. */
4157 Assert((pMixedCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); /* Bit 10 is reserved (RA1). */
4158 }
4159#endif
4160
4161 int rc;
4162 PVM pVM = pVCpu->CTX_SUFF(pVM);
4163 bool fInterceptDB = false;
4164 bool fInterceptMovDRx = false;
4165 if ( pVCpu->hm.s.fSingleInstruction
4166 || DBGFIsStepping(pVCpu))
4167 {
4168 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
4169 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
4170 {
4171 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
4172 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
4173 AssertRCReturn(rc, rc);
4174 Assert(fInterceptDB == false);
4175 }
4176 else
4177 {
4178 pMixedCtx->eflags.u32 |= X86_EFL_TF;
4179 pVCpu->hm.s.fClearTrapFlag = true;
4180 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
4181 fInterceptDB = true;
4182 }
4183 }
4184
4185 if ( fInterceptDB
4186 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
4187 {
4188 /*
4189 * Use the combined guest and host DRx values found in the hypervisor
4190 * register set because the debugger has breakpoints active or someone
4191 * is single stepping on the host side without a monitor trap flag.
4192 *
4193 * Note! DBGF expects a clean DR6 state before executing guest code.
4194 */
4195#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4196 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4197 && !CPUMIsHyperDebugStateActivePending(pVCpu))
4198 {
4199 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4200 Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
4201 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
4202 }
4203 else
4204#endif
4205 if (!CPUMIsHyperDebugStateActive(pVCpu))
4206 {
4207 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4208 Assert(CPUMIsHyperDebugStateActive(pVCpu));
4209 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
4210 }
4211
4212 /* Update DR7. (The other DRx values are handled by CPUM one way or the other.) */
4213 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)CPUMGetHyperDR7(pVCpu));
4214 AssertRCReturn(rc, rc);
4215
4216 pVCpu->hm.s.fUsingHyperDR7 = true;
4217 fInterceptDB = true;
4218 fInterceptMovDRx = true;
4219 }
4220 else
4221 {
4222 /*
4223 * If the guest has enabled debug registers, we need to load them prior to
4224 * executing guest code so they'll trigger at the right time.
4225 */
4226 if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
4227 {
4228#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4229 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4230 && !CPUMIsGuestDebugStateActivePending(pVCpu))
4231 {
4232 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4233 Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
4234 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
4235 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4236 }
4237 else
4238#endif
4239 if (!CPUMIsGuestDebugStateActive(pVCpu))
4240 {
4241 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4242 Assert(CPUMIsGuestDebugStateActive(pVCpu));
4243 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
4244 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4245 }
4246 Assert(!fInterceptDB);
4247 Assert(!fInterceptMovDRx);
4248 }
4249 /*
4250 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
4251 * must intercept #DB in order to maintain a correct DR6 guest value.
4252 */
4253#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4254 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu)
4255 && !CPUMIsGuestDebugStateActive(pVCpu))
4256#else
4257 else if (!CPUMIsGuestDebugStateActive(pVCpu))
4258#endif
4259 {
4260 fInterceptMovDRx = true;
4261 fInterceptDB = true;
4262 }
4263
4264 /* Update guest DR7. */
4265 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
4266 AssertRCReturn(rc, rc);
4267
4268 pVCpu->hm.s.fUsingHyperDR7 = false;
4269 }
4270
4271 /*
4272 * Update the exception bitmap regarding intercepting #DB generated by the guest.
4273 */
4274 if ( fInterceptDB
4275 || pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4276 {
4277 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
4278 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
4279 }
4280 else
4281 {
4282#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
4283 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
4284 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
4285#endif
4286 }
4287
4288 /*
4289 * Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions.
4290 */
4291 if (fInterceptMovDRx)
4292 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4293 else
4294 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4295 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
4296 AssertRCReturn(rc, rc);
4297
4298 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
4299 return VINF_SUCCESS;
4300}
4301
4302
4303#ifdef VBOX_STRICT
4304/**
4305 * Strict function to validate segment registers.
4306 *
4307 * @remarks ASSUMES CR0 is up to date.
4308 */
4309static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4310{
4311 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
4312 /* NOTE: The reason we check for attribute value 0 and not just the unusable bit here is because hmR0VmxWriteSegmentReg()
4313 * only updates the VMCS' copy of the value with the unusable bit and doesn't change the guest-context value. */
4314 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
4315 && ( !CPUMIsGuestInRealModeEx(pCtx)
4316 && !CPUMIsGuestInV86ModeEx(pCtx)))
4317 {
4318 /* Protected mode checks */
4319 /* CS */
4320 Assert(pCtx->cs.Attr.n.u1Present);
4321 Assert(!(pCtx->cs.Attr.u & 0xf00));
4322 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
4323 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4324 || !(pCtx->cs.Attr.n.u1Granularity));
4325 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
4326 || (pCtx->cs.Attr.n.u1Granularity));
4327 /* CS cannot be loaded with NULL in protected mode. */
4328 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS?!? */
4329 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4330 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
4331 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4332 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
4333 else
4334 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
4335 /* SS */
4336 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4337 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
4338 if ( !(pCtx->cr0 & X86_CR0_PE)
4339 || pCtx->cs.Attr.n.u4Type == 3)
4340 {
4341 Assert(!pCtx->ss.Attr.n.u2Dpl);
4342 }
4343 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4344 {
4345 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4346 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
4347 Assert(pCtx->ss.Attr.n.u1Present);
4348 Assert(!(pCtx->ss.Attr.u & 0xf00));
4349 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
4350 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4351 || !(pCtx->ss.Attr.n.u1Granularity));
4352 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
4353 || (pCtx->ss.Attr.n.u1Granularity));
4354 }
4355 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
4356 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4357 {
4358 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4359 Assert(pCtx->ds.Attr.n.u1Present);
4360 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
4361 Assert(!(pCtx->ds.Attr.u & 0xf00));
4362 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
4363 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4364 || !(pCtx->ds.Attr.n.u1Granularity));
4365 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
4366 || (pCtx->ds.Attr.n.u1Granularity));
4367 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4368 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
4369 }
4370 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4371 {
4372 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4373 Assert(pCtx->es.Attr.n.u1Present);
4374 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
4375 Assert(!(pCtx->es.Attr.u & 0xf00));
4376 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
4377 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
4378 || !(pCtx->es.Attr.n.u1Granularity));
4379 Assert( !(pCtx->es.u32Limit & 0xfff00000)
4380 || (pCtx->es.Attr.n.u1Granularity));
4381 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4382 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
4383 }
4384 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4385 {
4386 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4387 Assert(pCtx->fs.Attr.n.u1Present);
4388 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
4389 Assert(!(pCtx->fs.Attr.u & 0xf00));
4390 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
4391 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
4392 || !(pCtx->fs.Attr.n.u1Granularity));
4393 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
4394 || (pCtx->fs.Attr.n.u1Granularity));
4395 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4396 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4397 }
4398 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
4399 {
4400 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4401 Assert(pCtx->gs.Attr.n.u1Present);
4402 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
4403 Assert(!(pCtx->gs.Attr.u & 0xf00));
4404 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
4405 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
4406 || !(pCtx->gs.Attr.n.u1Granularity));
4407 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
4408 || (pCtx->gs.Attr.n.u1Granularity));
4409 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4410 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4411 }
4412 /* 64-bit capable CPUs. */
4413# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4414 if (HMVMX_IS_64BIT_HOST_MODE())
4415 {
4416 Assert(!(pCtx->cs.u64Base >> 32));
4417 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
4418 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
4419 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
4420 }
4421# endif
4422 }
4423 else if ( CPUMIsGuestInV86ModeEx(pCtx)
4424 || ( CPUMIsGuestInRealModeEx(pCtx)
4425 && !pVM->hm.s.vmx.fUnrestrictedGuest))
4426 {
4427 /* Real and v86 mode checks. */
4428 /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
4429 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
4430 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4431 {
4432 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
4433 }
4434 else
4435 {
4436 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
4437 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
4438 }
4439
4440 /* CS */
4441 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
4442 Assert(pCtx->cs.u32Limit == 0xffff);
4443 Assert(u32CSAttr == 0xf3);
4444 /* SS */
4445 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
4446 Assert(pCtx->ss.u32Limit == 0xffff);
4447 Assert(u32SSAttr == 0xf3);
4448 /* DS */
4449 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
4450 Assert(pCtx->ds.u32Limit == 0xffff);
4451 Assert(u32DSAttr == 0xf3);
4452 /* ES */
4453 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
4454 Assert(pCtx->es.u32Limit == 0xffff);
4455 Assert(u32ESAttr == 0xf3);
4456 /* FS */
4457 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
4458 Assert(pCtx->fs.u32Limit == 0xffff);
4459 Assert(u32FSAttr == 0xf3);
4460 /* GS */
4461 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
4462 Assert(pCtx->gs.u32Limit == 0xffff);
4463 Assert(u32GSAttr == 0xf3);
4464 /* 64-bit capable CPUs. */
4465# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4466 if (HMVMX_IS_64BIT_HOST_MODE())
4467 {
4468 Assert(!(pCtx->cs.u64Base >> 32));
4469 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
4470 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
4471 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
4472 }
4473# endif
4474 }
4475}
4476#endif /* VBOX_STRICT */
4477
4478
4479/**
4480 * Writes a guest segment register into the guest-state area in the VMCS.
4481 *
4482 * @returns VBox status code.
4483 * @param pVCpu Pointer to the VMCPU.
4484 * @param idxSel Index of the selector in the VMCS.
4485 * @param idxLimit Index of the segment limit in the VMCS.
4486 * @param idxBase Index of the segment base in the VMCS.
4487 * @param idxAccess Index of the access rights of the segment in the VMCS.
4488 * @param pSelReg Pointer to the segment selector.
4489 *
4490 * @remarks No-long-jump zone!!!
4491 */
4492static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
4493 uint32_t idxAccess, PCPUMSELREG pSelReg)
4494{
4495 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
4496 AssertRCReturn(rc, rc);
4497 rc = VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
4498 AssertRCReturn(rc, rc);
4499 rc = VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
4500 AssertRCReturn(rc, rc);
4501
4502 uint32_t u32Access = pSelReg->Attr.u;
4503 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4504 {
4505 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
4506 u32Access = 0xf3;
4507 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
4508 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
4509 }
4510 else
4511 {
4512 /*
4513 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
4514 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
4515 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
4516 * loaded in protected-mode have their attribute as 0.
4517 */
4518 if (!u32Access)
4519 u32Access = X86DESCATTR_UNUSABLE;
4520 }
4521
4522 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
4523 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
4524 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
4525
4526 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
4527 AssertRCReturn(rc, rc);
4528 return rc;
4529}
4530
4531
4532/**
4533 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
4534 * into the guest-state area in the VMCS.
4535 *
4536 * @returns VBox status code.
4537 * @param pVM Pointer to the VM.
4538 * @param pVCPU Pointer to the VMCPU.
4539 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4540 * out-of-sync. Make sure to update the required fields
4541 * before using them.
4542 *
4543 * @remarks ASSUMES pMixedCtx->cr0 is up to date (strict builds validation).
4544 * @remarks No-long-jump zone!!!
4545 */
4546static int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4547{
4548 int rc = VERR_INTERNAL_ERROR_5;
4549 PVM pVM = pVCpu->CTX_SUFF(pVM);
4550
4551 /*
4552 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
4553 */
4554 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
4555 {
4556 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
4557 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4558 {
4559 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pMixedCtx->cs.Attr.u;
4560 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pMixedCtx->ss.Attr.u;
4561 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pMixedCtx->ds.Attr.u;
4562 pVCpu->hm.s.vmx.RealMode.AttrES.u = pMixedCtx->es.Attr.u;
4563 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pMixedCtx->fs.Attr.u;
4564 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pMixedCtx->gs.Attr.u;
4565 }
4566
4567#ifdef VBOX_WITH_REM
4568 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
4569 {
4570 Assert(pVM->hm.s.vmx.pRealModeTSS);
4571 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
4572 if ( pVCpu->hm.s.vmx.fWasInRealMode
4573 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
4574 {
4575 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
4576 in real-mode (e.g. OpenBSD 4.0) */
4577 REMFlushTBs(pVM);
4578 Log4(("Load[%RU32]: Switch to protected mode detected!\n", pVCpu->idCpu));
4579 pVCpu->hm.s.vmx.fWasInRealMode = false;
4580 }
4581 }
4582#endif
4583 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
4584 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs);
4585 AssertRCReturn(rc, rc);
4586 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
4587 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss);
4588 AssertRCReturn(rc, rc);
4589 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
4590 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds);
4591 AssertRCReturn(rc, rc);
4592 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
4593 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es);
4594 AssertRCReturn(rc, rc);
4595 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
4596 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs);
4597 AssertRCReturn(rc, rc);
4598 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
4599 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs);
4600 AssertRCReturn(rc, rc);
4601
4602#ifdef VBOX_STRICT
4603 /* Validate. */
4604 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
4605#endif
4606
4607 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
4608 Log4(("Load[%RU32]: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pVCpu->idCpu, pMixedCtx->cs.Sel,
4609 pMixedCtx->cs.u64Base, pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
4610 }
4611
4612 /*
4613 * Guest TR.
4614 */
4615 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))
4616 {
4617 /*
4618 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
4619 * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
4620 * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
4621 */
4622 uint16_t u16Sel = 0;
4623 uint32_t u32Limit = 0;
4624 uint64_t u64Base = 0;
4625 uint32_t u32AccessRights = 0;
4626
4627 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4628 {
4629 u16Sel = pMixedCtx->tr.Sel;
4630 u32Limit = pMixedCtx->tr.u32Limit;
4631 u64Base = pMixedCtx->tr.u64Base;
4632 u32AccessRights = pMixedCtx->tr.Attr.u;
4633 }
4634 else
4635 {
4636 Assert(pVM->hm.s.vmx.pRealModeTSS);
4637 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
4638
4639 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
4640 RTGCPHYS GCPhys;
4641 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
4642 AssertRCReturn(rc, rc);
4643
4644 X86DESCATTR DescAttr;
4645 DescAttr.u = 0;
4646 DescAttr.n.u1Present = 1;
4647 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
4648
4649 u16Sel = 0;
4650 u32Limit = HM_VTX_TSS_SIZE;
4651 u64Base = GCPhys; /* in real-mode phys = virt. */
4652 u32AccessRights = DescAttr.u;
4653 }
4654
4655 /* Validate. */
4656 Assert(!(u16Sel & RT_BIT(2)));
4657 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
4658 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
4659 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
4660 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
4661 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
4662 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
4663 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
4664 Assert( (u32Limit & 0xfff) == 0xfff
4665 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
4666 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
4667 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
4668
4669 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel); AssertRCReturn(rc, rc);
4670 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRCReturn(rc, rc);
4671 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRCReturn(rc, rc);
4672 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc);
4673
4674 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
4675 Log4(("Load[%RU32]: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", pVCpu->idCpu, u64Base));
4676 }
4677
4678 /*
4679 * Guest GDTR.
4680 */
4681 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))
4682 {
4683 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); AssertRCReturn(rc, rc);
4684 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); AssertRCReturn(rc, rc);
4685
4686 /* Validate. */
4687 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4688
4689 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);
4690 Log4(("Load[%RU32]: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->gdtr.pGdt));
4691 }
4692
4693 /*
4694 * Guest LDTR.
4695 */
4696 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))
4697 {
4698 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
4699 uint32_t u32Access = 0;
4700 if (!pMixedCtx->ldtr.Attr.u)
4701 u32Access = X86DESCATTR_UNUSABLE;
4702 else
4703 u32Access = pMixedCtx->ldtr.Attr.u;
4704
4705 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel); AssertRCReturn(rc, rc);
4706 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit); AssertRCReturn(rc, rc);
4707 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base); AssertRCReturn(rc, rc);
4708 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRCReturn(rc, rc);
4709
4710 /* Validate. */
4711 if (!(u32Access & X86DESCATTR_UNUSABLE))
4712 {
4713 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
4714 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
4715 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
4716 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
4717 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
4718 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
4719 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
4720 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
4721 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
4722 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
4723 }
4724
4725 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
4726 Log4(("Load[%RU32]: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->ldtr.u64Base));
4727 }
4728
4729 /*
4730 * Guest IDTR.
4731 */
4732 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))
4733 {
4734 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); AssertRCReturn(rc, rc);
4735 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); AssertRCReturn(rc, rc);
4736
4737 /* Validate. */
4738 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4739
4740 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);
4741 Log4(("Load[%RU32]: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->idtr.pIdt));
4742 }
4743
4744 return VINF_SUCCESS;
4745}
4746
4747
4748/**
4749 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
4750 * areas.
4751 *
4752 * These MSRs will automatically be loaded to the host CPU on every successful
4753 * VM-entry and stored from the host CPU on every successful VM-exit. This also
4754 * creates/updates MSR slots for the host MSRs. The actual host MSR values are
4755 * -not- updated here for performance reasons. See hmR0VmxSaveHostMsrs().
4756 *
4757 * Also loads the sysenter MSRs into the guest-state area in the VMCS.
4758 *
4759 * @returns VBox status code.
4760 * @param pVCpu Pointer to the VMCPU.
4761 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4762 * out-of-sync. Make sure to update the required fields
4763 * before using them.
4764 *
4765 * @remarks No-long-jump zone!!!
4766 */
4767static int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4768{
4769 AssertPtr(pVCpu);
4770 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
4771
4772 /*
4773 * MSRs that we use the auto-load/store MSR area in the VMCS.
4774 */
4775 PVM pVM = pVCpu->CTX_SUFF(pVM);
4776 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS))
4777 {
4778 /* For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). */
4779#if HC_ARCH_BITS == 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4780 if (pVM->hm.s.fAllow64BitGuests)
4781 {
4782 int rc = VINF_SUCCESS;
4783 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false, NULL);
4784 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false, NULL);
4785 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false, NULL);
4786 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false, NULL);
4787 AssertRCReturn(rc, rc);
4788#ifdef DEBUG
4789 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
4790 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
4791 {
4792 Log4(("Load[%RU32]: MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", pVCpu->idCpu, i, pMsr->u32Msr,
4793 pMsr->u64Value));
4794 }
4795# endif
4796 }
4797#endif
4798 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
4799 }
4800
4801 /*
4802 * Guest Sysenter MSRs.
4803 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
4804 * VM-exits on WRMSRs for these MSRs.
4805 */
4806 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR))
4807 {
4808 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc);
4809 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);
4810 }
4811
4812 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR))
4813 {
4814 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc);
4815 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
4816 }
4817
4818 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR))
4819 {
4820 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc);
4821 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
4822 }
4823
4824 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))
4825 {
4826 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
4827 {
4828 /*
4829 * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
4830 * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
4831 */
4832 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
4833 {
4834 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER);
4835 AssertRCReturn(rc,rc);
4836 Log4(("Load[%RU32]: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pVCpu->idCpu, pMixedCtx->msrEFER));
4837 }
4838 else
4839 {
4840 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */,
4841 NULL /* pfAddedAndUpdated */);
4842 AssertRCReturn(rc, rc);
4843
4844 /* We need to intercept reads too, see @bugref{7386} comment #16. */
4845 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
4846 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
4847 Log4(("Load[%RU32]: MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", pVCpu->idCpu, MSR_K6_EFER,
4848 pMixedCtx->msrEFER, pVCpu->hm.s.vmx.cMsrs));
4849 }
4850 }
4851 else if (!pVM->hm.s.vmx.fSupportsVmcsEfer)
4852 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER);
4853 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
4854 }
4855
4856 return VINF_SUCCESS;
4857}
4858
4859
4860/**
4861 * Loads the guest activity state into the guest-state area in the VMCS.
4862 *
4863 * @returns VBox status code.
4864 * @param pVCpu Pointer to the VMCPU.
4865 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4866 * out-of-sync. Make sure to update the required fields
4867 * before using them.
4868 *
4869 * @remarks No-long-jump zone!!!
4870 */
4871static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
4872{
4873 NOREF(pCtx);
4874 /** @todo See if we can make use of other states, e.g.
4875 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
4876 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE))
4877 {
4878 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
4879 AssertRCReturn(rc, rc);
4880
4881 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE);
4882 }
4883 return VINF_SUCCESS;
4884}
4885
4886
4887/**
4888 * Sets up the appropriate function to run guest code.
4889 *
4890 * @returns VBox status code.
4891 * @param pVCpu Pointer to the VMCPU.
4892 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4893 * out-of-sync. Make sure to update the required fields
4894 * before using them.
4895 *
4896 * @remarks No-long-jump zone!!!
4897 */
4898static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4899{
4900 if (CPUMIsGuestInLongModeEx(pMixedCtx))
4901 {
4902#ifndef VBOX_ENABLE_64_BITS_GUESTS
4903 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4904#endif
4905 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
4906#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4907 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
4908 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
4909 {
4910 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4911 {
4912 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4913 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_VMX_EXIT_CTLS
4914 | HM_CHANGED_VMX_ENTRY_CTLS
4915 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
4916 }
4917 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
4918 }
4919#else
4920 /* 64-bit host or hybrid host. */
4921 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
4922#endif
4923 }
4924 else
4925 {
4926 /* Guest is not in long mode, use the 32-bit handler. */
4927#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4928 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32)
4929 {
4930 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4931 {
4932 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4933 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_VMX_EXIT_CTLS
4934 | HM_CHANGED_VMX_ENTRY_CTLS
4935 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
4936 }
4937 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4938 }
4939#else
4940 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4941#endif
4942 }
4943 Assert(pVCpu->hm.s.vmx.pfnStartVM);
4944 return VINF_SUCCESS;
4945}
4946
4947
4948/**
4949 * Wrapper for running the guest code in VT-x.
4950 *
4951 * @returns VBox strict status code.
4952 * @param pVM Pointer to the VM.
4953 * @param pVCpu Pointer to the VMCPU.
4954 * @param pCtx Pointer to the guest-CPU context.
4955 *
4956 * @remarks No-long-jump zone!!!
4957 */
4958DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4959{
4960 /*
4961 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
4962 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
4963 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
4964 */
4965 bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
4966 /** @todo Add stats for resume vs launch. */
4967#ifdef VBOX_WITH_KERNEL_USING_XMM
4968 return HMR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
4969#else
4970 return pVCpu->hm.s.vmx.pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
4971#endif
4972}
4973
4974
4975/**
4976 * Reports world-switch error and dumps some useful debug info.
4977 *
4978 * @param pVM Pointer to the VM.
4979 * @param pVCpu Pointer to the VMCPU.
4980 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
4981 * @param pCtx Pointer to the guest-CPU context.
4982 * @param pVmxTransient Pointer to the VMX transient structure (only
4983 * exitReason updated).
4984 */
4985static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
4986{
4987 Assert(pVM);
4988 Assert(pVCpu);
4989 Assert(pCtx);
4990 Assert(pVmxTransient);
4991 HMVMX_ASSERT_PREEMPT_SAFE();
4992
4993 Log4(("VM-entry failure: %Rrc\n", rcVMRun));
4994 switch (rcVMRun)
4995 {
4996 case VERR_VMX_INVALID_VMXON_PTR:
4997 AssertFailed();
4998 break;
4999 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
5000 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
5001 {
5002 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
5003 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
5004 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
5005 AssertRC(rc);
5006
5007 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
5008 /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted().
5009 Cannot do it here as we may have been long preempted. */
5010
5011#ifdef VBOX_STRICT
5012 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
5013 pVmxTransient->uExitReason));
5014 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));
5015 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
5016 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
5017 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
5018 else
5019 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
5020 Log4(("Entered host CPU %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));
5021 Log4(("Current host CPU %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));
5022
5023 /* VMX control bits. */
5024 uint32_t u32Val;
5025 uint64_t u64Val;
5026 HMVMXHCUINTREG uHCReg;
5027 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
5028 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
5029 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
5030 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
5031 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
5032 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
5033 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
5034 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
5035 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
5036 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
5037 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
5038 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
5039 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
5040 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
5041 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
5042 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
5043 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
5044 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
5045 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
5046 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
5047 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
5048 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
5049 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
5050 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
5051 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
5052 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
5053 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
5054 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
5055 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
5056 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
5057 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
5058 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
5059 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
5060 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
5061 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
5062 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
5063 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
5064 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
5065 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
5066 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
5067 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
5068 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
5069
5070 /* Guest bits. */
5071 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc);
5072 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val));
5073 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc);
5074 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val));
5075 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
5076 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
5077 rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc);
5078 Log4(("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val));
5079
5080 /* Host bits. */
5081 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
5082 Log4(("Host CR0 %#RHr\n", uHCReg));
5083 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
5084 Log4(("Host CR3 %#RHr\n", uHCReg));
5085 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
5086 Log4(("Host CR4 %#RHr\n", uHCReg));
5087
5088 RTGDTR HostGdtr;
5089 PCX86DESCHC pDesc;
5090 ASMGetGDTR(&HostGdtr);
5091 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_CS, &u32Val); AssertRC(rc);
5092 Log4(("Host CS %#08x\n", u32Val));
5093 if (u32Val < HostGdtr.cbGdt)
5094 {
5095 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5096 HMR0DumpDescriptor(pDesc, u32Val, "CS: ");
5097 }
5098
5099 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc);
5100 Log4(("Host DS %#08x\n", u32Val));
5101 if (u32Val < HostGdtr.cbGdt)
5102 {
5103 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5104 HMR0DumpDescriptor(pDesc, u32Val, "DS: ");
5105 }
5106
5107 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc);
5108 Log4(("Host ES %#08x\n", u32Val));
5109 if (u32Val < HostGdtr.cbGdt)
5110 {
5111 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5112 HMR0DumpDescriptor(pDesc, u32Val, "ES: ");
5113 }
5114
5115 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc);
5116 Log4(("Host FS %#08x\n", u32Val));
5117 if (u32Val < HostGdtr.cbGdt)
5118 {
5119 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5120 HMR0DumpDescriptor(pDesc, u32Val, "FS: ");
5121 }
5122
5123 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc);
5124 Log4(("Host GS %#08x\n", u32Val));
5125 if (u32Val < HostGdtr.cbGdt)
5126 {
5127 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5128 HMR0DumpDescriptor(pDesc, u32Val, "GS: ");
5129 }
5130
5131 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc);
5132 Log4(("Host SS %#08x\n", u32Val));
5133 if (u32Val < HostGdtr.cbGdt)
5134 {
5135 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5136 HMR0DumpDescriptor(pDesc, u32Val, "SS: ");
5137 }
5138
5139 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc);
5140 Log4(("Host TR %#08x\n", u32Val));
5141 if (u32Val < HostGdtr.cbGdt)
5142 {
5143 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5144 HMR0DumpDescriptor(pDesc, u32Val, "TR: ");
5145 }
5146
5147 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
5148 Log4(("Host TR Base %#RHv\n", uHCReg));
5149 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
5150 Log4(("Host GDTR Base %#RHv\n", uHCReg));
5151 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
5152 Log4(("Host IDTR Base %#RHv\n", uHCReg));
5153 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
5154 Log4(("Host SYSENTER CS %#08x\n", u32Val));
5155 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
5156 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg));
5157 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
5158 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg));
5159 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
5160 Log4(("Host RSP %#RHv\n", uHCReg));
5161 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
5162 Log4(("Host RIP %#RHv\n", uHCReg));
5163# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
5164 if (HMVMX_IS_64BIT_HOST_MODE())
5165 {
5166 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
5167 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
5168 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
5169 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
5170 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
5171 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
5172 }
5173# endif
5174#endif /* VBOX_STRICT */
5175 break;
5176 }
5177
5178 default:
5179 /* Impossible */
5180 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
5181 break;
5182 }
5183 NOREF(pVM); NOREF(pCtx);
5184}
5185
5186
5187#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
5188#ifndef VMX_USE_CACHED_VMCS_ACCESSES
5189# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
5190#endif
5191#ifdef VBOX_STRICT
5192static bool hmR0VmxIsValidWriteField(uint32_t idxField)
5193{
5194 switch (idxField)
5195 {
5196 case VMX_VMCS_GUEST_RIP:
5197 case VMX_VMCS_GUEST_RSP:
5198 case VMX_VMCS_GUEST_SYSENTER_EIP:
5199 case VMX_VMCS_GUEST_SYSENTER_ESP:
5200 case VMX_VMCS_GUEST_GDTR_BASE:
5201 case VMX_VMCS_GUEST_IDTR_BASE:
5202 case VMX_VMCS_GUEST_CS_BASE:
5203 case VMX_VMCS_GUEST_DS_BASE:
5204 case VMX_VMCS_GUEST_ES_BASE:
5205 case VMX_VMCS_GUEST_FS_BASE:
5206 case VMX_VMCS_GUEST_GS_BASE:
5207 case VMX_VMCS_GUEST_SS_BASE:
5208 case VMX_VMCS_GUEST_LDTR_BASE:
5209 case VMX_VMCS_GUEST_TR_BASE:
5210 case VMX_VMCS_GUEST_CR3:
5211 return true;
5212 }
5213 return false;
5214}
5215
5216static bool hmR0VmxIsValidReadField(uint32_t idxField)
5217{
5218 switch (idxField)
5219 {
5220 /* Read-only fields. */
5221 case VMX_VMCS_RO_EXIT_QUALIFICATION:
5222 return true;
5223 }
5224 /* Remaining readable fields should also be writable. */
5225 return hmR0VmxIsValidWriteField(idxField);
5226}
5227#endif /* VBOX_STRICT */
5228
5229
5230/**
5231 * Executes the specified handler in 64-bit mode.
5232 *
5233 * @returns VBox status code.
5234 * @param pVM Pointer to the VM.
5235 * @param pVCpu Pointer to the VMCPU.
5236 * @param pCtx Pointer to the guest CPU context.
5237 * @param enmOp The operation to perform.
5238 * @param cParams Number of parameters.
5239 * @param paParam Array of 32-bit parameters.
5240 */
5241VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp,
5242 uint32_t cParams, uint32_t *paParam)
5243{
5244 int rc, rc2;
5245 PHMGLOBALCPUINFO pCpu;
5246 RTHCPHYS HCPhysCpuPage;
5247 RTCCUINTREG fOldEFlags;
5248
5249 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
5250 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
5251 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
5252 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
5253
5254#ifdef VBOX_STRICT
5255 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
5256 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
5257
5258 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
5259 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
5260#endif
5261
5262 /* Disable interrupts. */
5263 fOldEFlags = ASMIntDisableFlags();
5264
5265#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
5266 RTCPUID idHostCpu = RTMpCpuId();
5267 CPUMR0SetLApic(pVCpu, idHostCpu);
5268#endif
5269
5270 pCpu = HMR0GetCurrentCpu();
5271 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
5272
5273 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
5274 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5275
5276 /* Leave VMX Root Mode. */
5277 VMXDisable();
5278
5279 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
5280
5281 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
5282 CPUMSetHyperEIP(pVCpu, enmOp);
5283 for (int i = (int)cParams - 1; i >= 0; i--)
5284 CPUMPushHyper(pVCpu, paParam[i]);
5285
5286 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
5287
5288 /* Call the switcher. */
5289 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
5290 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
5291
5292 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
5293 /* Make sure the VMX instructions don't cause #UD faults. */
5294 SUPR0ChangeCR4(X86_CR4_VMXE, ~0);
5295
5296 /* Re-enter VMX Root Mode */
5297 rc2 = VMXEnable(HCPhysCpuPage);
5298 if (RT_FAILURE(rc2))
5299 {
5300 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
5301 ASMSetFlags(fOldEFlags);
5302 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
5303 return rc2;
5304 }
5305
5306 rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5307 AssertRC(rc2);
5308 Assert(!(ASMGetFlags() & X86_EFL_IF));
5309 ASMSetFlags(fOldEFlags);
5310 return rc;
5311}
5312
5313
5314/**
5315 * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts
5316 * supporting 64-bit guests.
5317 *
5318 * @returns VBox status code.
5319 * @param fResume Whether to VMLAUNCH or VMRESUME.
5320 * @param pCtx Pointer to the guest-CPU context.
5321 * @param pCache Pointer to the VMCS cache.
5322 * @param pVM Pointer to the VM.
5323 * @param pVCpu Pointer to the VMCPU.
5324 */
5325DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
5326{
5327 PHMGLOBALCPUINFO pCpu = NULL;
5328 RTHCPHYS HCPhysCpuPage = 0;
5329 int rc = VERR_INTERNAL_ERROR_5;
5330
5331 pCpu = HMR0GetCurrentCpu();
5332 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
5333
5334#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5335 pCache->uPos = 1;
5336 pCache->interPD = PGMGetInterPaeCR3(pVM);
5337 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
5338#endif
5339
5340#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5341 pCache->TestIn.HCPhysCpuPage = 0;
5342 pCache->TestIn.HCPhysVmcs = 0;
5343 pCache->TestIn.pCache = 0;
5344 pCache->TestOut.HCPhysVmcs = 0;
5345 pCache->TestOut.pCache = 0;
5346 pCache->TestOut.pCtx = 0;
5347 pCache->TestOut.eflags = 0;
5348#endif
5349
5350 uint32_t aParam[10];
5351 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
5352 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
5353 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
5354 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
5355 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
5356 aParam[5] = 0;
5357 aParam[6] = VM_RC_ADDR(pVM, pVM);
5358 aParam[7] = 0;
5359 aParam[8] = VM_RC_ADDR(pVM, pVCpu);
5360 aParam[9] = 0;
5361
5362#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5363 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
5364 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
5365#endif
5366 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]);
5367
5368#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5369 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
5370 Assert(pCtx->dr[4] == 10);
5371 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
5372#endif
5373
5374#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5375 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
5376 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5377 pVCpu->hm.s.vmx.HCPhysVmcs));
5378 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5379 pCache->TestOut.HCPhysVmcs));
5380 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
5381 pCache->TestOut.pCache));
5382 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
5383 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
5384 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
5385 pCache->TestOut.pCtx));
5386 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
5387#endif
5388 return rc;
5389}
5390
5391
5392/**
5393 * Initialize the VMCS-Read cache.
5394 *
5395 * The VMCS cache is used for 32-bit hosts running 64-bit guests (except 32-bit
5396 * Darwin which runs with 64-bit paging in 32-bit mode) for 64-bit fields that
5397 * cannot be accessed in 32-bit mode. Some 64-bit fields -can- be accessed
5398 * (those that have a 32-bit FULL & HIGH part).
5399 *
5400 * @returns VBox status code.
5401 * @param pVM Pointer to the VM.
5402 * @param pVCpu Pointer to the VMCPU.
5403 */
5404static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
5405{
5406#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
5407{ \
5408 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
5409 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
5410 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
5411 ++cReadFields; \
5412}
5413
5414 AssertPtr(pVM);
5415 AssertPtr(pVCpu);
5416 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5417 uint32_t cReadFields = 0;
5418
5419 /*
5420 * Don't remove the #if 0'd fields in this code. They're listed here for consistency
5421 * and serve to indicate exceptions to the rules.
5422 */
5423
5424 /* Guest-natural selector base fields. */
5425#if 0
5426 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
5427 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
5428 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
5429#endif
5430 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
5431 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
5432 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
5433 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
5434 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
5435 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
5436 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
5437 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
5438 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
5439 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
5440 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
5441 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
5442#if 0
5443 /* Unused natural width guest-state fields. */
5444 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
5445 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
5446#endif
5447 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
5448 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
5449
5450 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
5451#if 0
5452 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
5453 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
5454 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
5455 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
5456 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
5457 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
5458 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
5459 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
5460 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
5461#endif
5462
5463 /* Natural width guest-state fields. */
5464 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
5465#if 0
5466 /* Currently unused field. */
5467 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
5468#endif
5469
5470 if (pVM->hm.s.fNestedPaging)
5471 {
5472 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
5473 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
5474 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
5475 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
5476 }
5477 else
5478 {
5479 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
5480 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
5481 }
5482
5483#undef VMXLOCAL_INIT_READ_CACHE_FIELD
5484 return VINF_SUCCESS;
5485}
5486
5487
5488/**
5489 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
5490 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
5491 * darwin, running 64-bit guests).
5492 *
5493 * @returns VBox status code.
5494 * @param pVCpu Pointer to the VMCPU.
5495 * @param idxField The VMCS field encoding.
5496 * @param u64Val 16, 32 or 64-bit value.
5497 */
5498VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5499{
5500 int rc;
5501 switch (idxField)
5502 {
5503 /*
5504 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
5505 */
5506 /* 64-bit Control fields. */
5507 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
5508 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
5509 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
5510 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
5511 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
5512 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
5513 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
5514 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
5515 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
5516 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
5517 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
5518 case VMX_VMCS64_CTRL_EPTP_FULL:
5519 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
5520 /* 64-bit Guest-state fields. */
5521 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
5522 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
5523 case VMX_VMCS64_GUEST_PAT_FULL:
5524 case VMX_VMCS64_GUEST_EFER_FULL:
5525 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
5526 case VMX_VMCS64_GUEST_PDPTE0_FULL:
5527 case VMX_VMCS64_GUEST_PDPTE1_FULL:
5528 case VMX_VMCS64_GUEST_PDPTE2_FULL:
5529 case VMX_VMCS64_GUEST_PDPTE3_FULL:
5530 /* 64-bit Host-state fields. */
5531 case VMX_VMCS64_HOST_FIELD_PAT_FULL:
5532 case VMX_VMCS64_HOST_FIELD_EFER_FULL:
5533 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
5534 {
5535 rc = VMXWriteVmcs32(idxField, u64Val);
5536 rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32));
5537 break;
5538 }
5539
5540 /*
5541 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
5542 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
5543 */
5544 /* Natural-width Guest-state fields. */
5545 case VMX_VMCS_GUEST_CR3:
5546 case VMX_VMCS_GUEST_ES_BASE:
5547 case VMX_VMCS_GUEST_CS_BASE:
5548 case VMX_VMCS_GUEST_SS_BASE:
5549 case VMX_VMCS_GUEST_DS_BASE:
5550 case VMX_VMCS_GUEST_FS_BASE:
5551 case VMX_VMCS_GUEST_GS_BASE:
5552 case VMX_VMCS_GUEST_LDTR_BASE:
5553 case VMX_VMCS_GUEST_TR_BASE:
5554 case VMX_VMCS_GUEST_GDTR_BASE:
5555 case VMX_VMCS_GUEST_IDTR_BASE:
5556 case VMX_VMCS_GUEST_RSP:
5557 case VMX_VMCS_GUEST_RIP:
5558 case VMX_VMCS_GUEST_SYSENTER_ESP:
5559 case VMX_VMCS_GUEST_SYSENTER_EIP:
5560 {
5561 if (!(u64Val >> 32))
5562 {
5563 /* If this field is 64-bit, VT-x will zero out the top bits. */
5564 rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
5565 }
5566 else
5567 {
5568 /* Assert that only the 32->64 switcher case should ever come here. */
5569 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
5570 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
5571 }
5572 break;
5573 }
5574
5575 default:
5576 {
5577 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
5578 rc = VERR_INVALID_PARAMETER;
5579 break;
5580 }
5581 }
5582 AssertRCReturn(rc, rc);
5583 return rc;
5584}
5585
5586
5587/**
5588 * Queue up a VMWRITE by using the VMCS write cache.
5589 * This is only used on 32-bit hosts (except darwin) for 64-bit guests.
5590 *
5591 * @param pVCpu Pointer to the VMCPU.
5592 * @param idxField The VMCS field encoding.
5593 * @param u64Val 16, 32 or 64-bit value.
5594 */
5595VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5596{
5597 AssertPtr(pVCpu);
5598 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5599
5600 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
5601 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
5602
5603 /* Make sure there are no duplicates. */
5604 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
5605 {
5606 if (pCache->Write.aField[i] == idxField)
5607 {
5608 pCache->Write.aFieldVal[i] = u64Val;
5609 return VINF_SUCCESS;
5610 }
5611 }
5612
5613 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
5614 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
5615 pCache->Write.cValidEntries++;
5616 return VINF_SUCCESS;
5617}
5618
5619/* Enable later when the assembly code uses these as callbacks. */
5620#if 0
5621/*
5622 * Loads the VMCS write-cache into the CPU (by executing VMWRITEs).
5623 *
5624 * @param pVCpu Pointer to the VMCPU.
5625 * @param pCache Pointer to the VMCS cache.
5626 *
5627 * @remarks No-long-jump zone!!!
5628 */
5629VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache)
5630{
5631 AssertPtr(pCache);
5632 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
5633 {
5634 int rc = VMXWriteVmcs64(pCache->Write.aField[i], pCache->Write.aFieldVal[i]);
5635 AssertRC(rc);
5636 }
5637 pCache->Write.cValidEntries = 0;
5638}
5639
5640
5641/**
5642 * Stores the VMCS read-cache from the CPU (by executing VMREADs).
5643 *
5644 * @param pVCpu Pointer to the VMCPU.
5645 * @param pCache Pointer to the VMCS cache.
5646 *
5647 * @remarks No-long-jump zone!!!
5648 */
5649VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache)
5650{
5651 AssertPtr(pCache);
5652 for (uint32_t i = 0; i < pCache->Read.cValidEntries; i++)
5653 {
5654 int rc = VMXReadVmcs64(pCache->Read.aField[i], &pCache->Read.aFieldVal[i]);
5655 AssertRC(rc);
5656 }
5657}
5658#endif
5659#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
5660
5661
5662/**
5663 * Sets up the usage of TSC-offsetting and updates the VMCS.
5664 *
5665 * If offsetting is not possible, cause VM-exits on RDTSC(P)s. Also sets up the
5666 * VMX preemption timer.
5667 *
5668 * @returns VBox status code.
5669 * @param pVM Pointer to the cross context VM structure.
5670 * @param pVCpu Pointer to the VMCPU.
5671 *
5672 * @remarks No-long-jump zone!!!
5673 */
5674static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVM pVM, PVMCPU pVCpu)
5675{
5676 int rc;
5677 bool fOffsettedTsc;
5678 bool fParavirtTsc;
5679 if (pVM->hm.s.vmx.fUsePreemptTimer)
5680 {
5681 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset,
5682 &fOffsettedTsc, &fParavirtTsc);
5683
5684 /* Make sure the returned values have sane upper and lower boundaries. */
5685 uint64_t u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
5686 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
5687 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
5688 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
5689
5690 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
5691 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);
5692 }
5693 else
5694 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc);
5695
5696 /** @todo later optimize this to be done elsewhere and not before every
5697 * VM-entry. */
5698 if (fParavirtTsc)
5699 {
5700 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
5701 information before every VM-entry, hence disable it for performance sake. */
5702#if 0
5703 rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
5704 AssertRC(rc);
5705#endif
5706 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
5707 }
5708
5709 if (fOffsettedTsc)
5710 {
5711 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
5712 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
5713
5714 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5715 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5716 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
5717 }
5718 else
5719 {
5720 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
5721 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5722 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5723 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
5724 }
5725}
5726
5727
5728/**
5729 * Determines if an exception is a contributory exception.
5730 *
5731 * Contributory exceptions are ones which can cause double-faults unless the
5732 * original exception was a benign exception. Page-fault is intentionally not
5733 * included here as it's a conditional contributory exception.
5734 *
5735 * @returns true if the exception is contributory, false otherwise.
5736 * @param uVector The exception vector.
5737 */
5738DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
5739{
5740 switch (uVector)
5741 {
5742 case X86_XCPT_GP:
5743 case X86_XCPT_SS:
5744 case X86_XCPT_NP:
5745 case X86_XCPT_TS:
5746 case X86_XCPT_DE:
5747 return true;
5748 default:
5749 break;
5750 }
5751 return false;
5752}
5753
5754
5755/**
5756 * Sets an event as a pending event to be injected into the guest.
5757 *
5758 * @param pVCpu Pointer to the VMCPU.
5759 * @param u32IntInfo The VM-entry interruption-information field.
5760 * @param cbInstr The VM-entry instruction length in bytes (for software
5761 * interrupts, exceptions and privileged software
5762 * exceptions).
5763 * @param u32ErrCode The VM-entry exception error code.
5764 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
5765 * page-fault.
5766 *
5767 * @remarks Statistics counter assumes this is a guest event being injected or
5768 * re-injected into the guest, i.e. 'StatInjectPendingReflect' is
5769 * always incremented.
5770 */
5771DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
5772 RTGCUINTPTR GCPtrFaultAddress)
5773{
5774 Assert(!pVCpu->hm.s.Event.fPending);
5775 pVCpu->hm.s.Event.fPending = true;
5776 pVCpu->hm.s.Event.u64IntInfo = u32IntInfo;
5777 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
5778 pVCpu->hm.s.Event.cbInstr = cbInstr;
5779 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
5780
5781 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
5782}
5783
5784
5785/**
5786 * Sets a double-fault (#DF) exception as pending-for-injection into the VM.
5787 *
5788 * @param pVCpu Pointer to the VMCPU.
5789 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5790 * out-of-sync. Make sure to update the required fields
5791 * before using them.
5792 */
5793DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5794{
5795 NOREF(pMixedCtx);
5796 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
5797 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5798 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5799 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5800}
5801
5802
5803/**
5804 * Handle a condition that occurred while delivering an event through the guest
5805 * IDT.
5806 *
5807 * @returns VBox status code (informational error codes included).
5808 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5809 * @retval VINF_HM_DOUBLE_FAULT if a #DF condition was detected and we ought to
5810 * continue execution of the guest which will delivery the #DF.
5811 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5812 *
5813 * @param pVCpu Pointer to the VMCPU.
5814 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5815 * out-of-sync. Make sure to update the required fields
5816 * before using them.
5817 * @param pVmxTransient Pointer to the VMX transient structure.
5818 *
5819 * @remarks No-long-jump zone!!!
5820 */
5821static int hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
5822{
5823 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
5824
5825 int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
5826 AssertRCReturn(rc, rc);
5827 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
5828 AssertRCReturn(rc, rc);
5829
5830 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
5831 {
5832 uint32_t uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
5833 uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
5834
5835 typedef enum
5836 {
5837 VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
5838 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
5839 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
5840 VMXREFLECTXCPT_NONE /* Nothing to reflect. */
5841 } VMXREFLECTXCPT;
5842
5843 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
5844 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
5845 if (VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo))
5846 {
5847 if (uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
5848 {
5849 enmReflect = VMXREFLECTXCPT_XCPT;
5850#ifdef VBOX_STRICT
5851 if ( hmR0VmxIsContributoryXcpt(uIdtVector)
5852 && uExitVector == X86_XCPT_PF)
5853 {
5854 Log4(("IDT: vcpu[%RU32] Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5855 }
5856#endif
5857 if ( uExitVector == X86_XCPT_PF
5858 && uIdtVector == X86_XCPT_PF)
5859 {
5860 pVmxTransient->fVectoringDoublePF = true;
5861 Log4(("IDT: vcpu[%RU32] Vectoring Double #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5862 }
5863 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
5864 && hmR0VmxIsContributoryXcpt(uExitVector)
5865 && ( hmR0VmxIsContributoryXcpt(uIdtVector)
5866 || uIdtVector == X86_XCPT_PF))
5867 {
5868 enmReflect = VMXREFLECTXCPT_DF;
5869 }
5870 else if (uIdtVector == X86_XCPT_DF)
5871 enmReflect = VMXREFLECTXCPT_TF;
5872 }
5873 else if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
5874 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
5875 {
5876 /*
5877 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and
5878 * privileged software exception (#DB from ICEBP) as they reoccur when restarting the instruction.
5879 */
5880 enmReflect = VMXREFLECTXCPT_XCPT;
5881
5882 if (uExitVector == X86_XCPT_PF)
5883 {
5884 pVmxTransient->fVectoringPF = true;
5885 Log4(("IDT: vcpu[%RU32] Vectoring #PF due to Ext-Int/NMI. uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5886 }
5887 }
5888 }
5889 else if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5890 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
5891 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
5892 {
5893 /*
5894 * If event delivery caused an EPT violation/misconfig or APIC access VM-exit, then the VM-exit
5895 * interruption-information will not be valid as it's not an exception and we end up here. In such cases,
5896 * it is sufficient to reflect the original exception to the guest after handling the VM-exit.
5897 */
5898 enmReflect = VMXREFLECTXCPT_XCPT;
5899 }
5900
5901 /*
5902 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig etc.) occurred
5903 * while delivering the NMI, we need to clear the block-by-NMI field in the guest interruptibility-state before
5904 * re-delivering the NMI after handling the VM-exit. Otherwise the subsequent VM-entry would fail.
5905 *
5906 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}.
5907 */
5908 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5909 && enmReflect == VMXREFLECTXCPT_XCPT
5910 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
5911 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
5912 {
5913 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
5914 }
5915
5916 switch (enmReflect)
5917 {
5918 case VMXREFLECTXCPT_XCPT:
5919 {
5920 Assert( uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5921 && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5922 && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
5923
5924 uint32_t u32ErrCode = 0;
5925 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo))
5926 {
5927 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
5928 AssertRCReturn(rc, rc);
5929 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5930 }
5931
5932 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
5933 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
5934 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
5935 rc = VINF_SUCCESS;
5936 Log4(("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu,
5937 pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.u32ErrCode));
5938
5939 break;
5940 }
5941
5942 case VMXREFLECTXCPT_DF:
5943 {
5944 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
5945 rc = VINF_HM_DOUBLE_FAULT;
5946 Log4(("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,
5947 pVCpu->hm.s.Event.u64IntInfo, uIdtVector, uExitVector));
5948
5949 break;
5950 }
5951
5952 case VMXREFLECTXCPT_TF:
5953 {
5954 rc = VINF_EM_RESET;
5955 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector,
5956 uExitVector));
5957 break;
5958 }
5959
5960 default:
5961 Assert(rc == VINF_SUCCESS);
5962 break;
5963 }
5964 }
5965 else if ( VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo)
5966 && VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(pVmxTransient->uExitIntInfo)
5967 && uExitVector != X86_XCPT_DF
5968 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
5969 {
5970 /*
5971 * Execution of IRET caused this fault when NMI blocking was in effect (i.e we're in the guest NMI handler).
5972 * We need to set the block-by-NMI field so that NMIs remain blocked until the IRET execution is restarted.
5973 * See Intel spec. 30.7.1.2 "Resuming guest software after handling an exception".
5974 */
5975 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
5976 {
5977 Log4(("hmR0VmxCheckExitDueToEventDelivery: vcpu[%RU32] Setting VMCPU_FF_BLOCK_NMIS. Valid=%RTbool uExitReason=%u\n",
5978 pVCpu->idCpu, VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason));
5979 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5980 }
5981 }
5982
5983 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET);
5984 return rc;
5985}
5986
5987
5988/**
5989 * Saves the guest's CR0 register from the VMCS into the guest-CPU context.
5990 *
5991 * @returns VBox status code.
5992 * @param pVCpu Pointer to the VMCPU.
5993 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5994 * out-of-sync. Make sure to update the required fields
5995 * before using them.
5996 *
5997 * @remarks No-long-jump zone!!!
5998 */
5999static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6000{
6001 NOREF(pMixedCtx);
6002
6003 /*
6004 * While in the middle of saving guest-CR0, we could get preempted and re-invoked from the preemption hook,
6005 * see hmR0VmxLeave(). Safer to just make this code non-preemptible.
6006 */
6007 VMMRZCallRing3Disable(pVCpu);
6008 HM_DISABLE_PREEMPT();
6009
6010 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0))
6011 {
6012 uint32_t uVal = 0;
6013 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal);
6014 AssertRCReturn(rc, rc);
6015
6016 uint32_t uShadow = 0;
6017 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
6018 AssertRCReturn(rc, rc);
6019
6020 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR0Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR0Mask);
6021 CPUMSetGuestCR0(pVCpu, uVal);
6022 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0);
6023 }
6024
6025 HM_RESTORE_PREEMPT();
6026 VMMRZCallRing3Enable(pVCpu);
6027 return VINF_SUCCESS;
6028}
6029
6030
6031/**
6032 * Saves the guest's CR4 register from the VMCS into the guest-CPU context.
6033 *
6034 * @returns VBox status code.
6035 * @param pVCpu Pointer to the VMCPU.
6036 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6037 * out-of-sync. Make sure to update the required fields
6038 * before using them.
6039 *
6040 * @remarks No-long-jump zone!!!
6041 */
6042static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6043{
6044 NOREF(pMixedCtx);
6045
6046 int rc = VINF_SUCCESS;
6047 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4))
6048 {
6049 uint32_t uVal = 0;
6050 uint32_t uShadow = 0;
6051 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal);
6052 AssertRCReturn(rc, rc);
6053 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
6054 AssertRCReturn(rc, rc);
6055
6056 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR4Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR4Mask);
6057 CPUMSetGuestCR4(pVCpu, uVal);
6058 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4);
6059 }
6060 return rc;
6061}
6062
6063
6064/**
6065 * Saves the guest's RIP register from the VMCS into the guest-CPU context.
6066 *
6067 * @returns VBox status code.
6068 * @param pVCpu Pointer to the VMCPU.
6069 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6070 * out-of-sync. Make sure to update the required fields
6071 * before using them.
6072 *
6073 * @remarks No-long-jump zone!!!
6074 */
6075static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6076{
6077 int rc = VINF_SUCCESS;
6078 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP))
6079 {
6080 uint64_t u64Val = 0;
6081 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
6082 AssertRCReturn(rc, rc);
6083
6084 pMixedCtx->rip = u64Val;
6085 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP);
6086 }
6087 return rc;
6088}
6089
6090
6091/**
6092 * Saves the guest's RSP register from the VMCS into the guest-CPU context.
6093 *
6094 * @returns VBox status code.
6095 * @param pVCpu Pointer to the VMCPU.
6096 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6097 * out-of-sync. Make sure to update the required fields
6098 * before using them.
6099 *
6100 * @remarks No-long-jump zone!!!
6101 */
6102static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6103{
6104 int rc = VINF_SUCCESS;
6105 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP))
6106 {
6107 uint64_t u64Val = 0;
6108 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
6109 AssertRCReturn(rc, rc);
6110
6111 pMixedCtx->rsp = u64Val;
6112 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP);
6113 }
6114 return rc;
6115}
6116
6117
6118/**
6119 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
6120 *
6121 * @returns VBox status code.
6122 * @param pVCpu Pointer to the VMCPU.
6123 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6124 * out-of-sync. Make sure to update the required fields
6125 * before using them.
6126 *
6127 * @remarks No-long-jump zone!!!
6128 */
6129static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6130{
6131 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS))
6132 {
6133 uint32_t uVal = 0;
6134 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal);
6135 AssertRCReturn(rc, rc);
6136
6137 pMixedCtx->eflags.u32 = uVal;
6138 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) /* Undo our real-on-v86-mode changes to eflags if necessary. */
6139 {
6140 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
6141 Log4(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32));
6142
6143 pMixedCtx->eflags.Bits.u1VM = 0;
6144 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
6145 }
6146
6147 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS);
6148 }
6149 return VINF_SUCCESS;
6150}
6151
6152
6153/**
6154 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
6155 * guest-CPU context.
6156 */
6157DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6158{
6159 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6160 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
6161 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6162 return rc;
6163}
6164
6165
6166/**
6167 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
6168 * from the guest-state area in the VMCS.
6169 *
6170 * @param pVCpu Pointer to the VMCPU.
6171 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6172 * out-of-sync. Make sure to update the required fields
6173 * before using them.
6174 *
6175 * @remarks No-long-jump zone!!!
6176 */
6177static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6178{
6179 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE))
6180 {
6181 uint32_t uIntrState = 0;
6182 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
6183 AssertRC(rc);
6184
6185 if (!uIntrState)
6186 {
6187 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6188 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6189
6190 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6191 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6192 }
6193 else
6194 {
6195 if (uIntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
6196 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI))
6197 {
6198 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6199 AssertRC(rc);
6200 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */
6201 AssertRC(rc);
6202
6203 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
6204 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
6205 }
6206 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6207 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6208
6209 if (uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)
6210 {
6211 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6212 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6213 }
6214 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6215 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6216 }
6217
6218 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE);
6219 }
6220}
6221
6222
6223/**
6224 * Saves the guest's activity state.
6225 *
6226 * @returns VBox status code.
6227 * @param pVCpu Pointer to the VMCPU.
6228 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6229 * out-of-sync. Make sure to update the required fields
6230 * before using them.
6231 *
6232 * @remarks No-long-jump zone!!!
6233 */
6234static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6235{
6236 NOREF(pMixedCtx);
6237 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
6238 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_ACTIVITY_STATE);
6239 return VINF_SUCCESS;
6240}
6241
6242
6243/**
6244 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
6245 * the current VMCS into the guest-CPU context.
6246 *
6247 * @returns VBox status code.
6248 * @param pVCpu Pointer to the VMCPU.
6249 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6250 * out-of-sync. Make sure to update the required fields
6251 * before using them.
6252 *
6253 * @remarks No-long-jump zone!!!
6254 */
6255static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6256{
6257 int rc = VINF_SUCCESS;
6258 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
6259 {
6260 uint32_t u32Val = 0;
6261 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc);
6262 pMixedCtx->SysEnter.cs = u32Val;
6263 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR);
6264 }
6265
6266 uint64_t u64Val = 0;
6267 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
6268 {
6269 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &u64Val); AssertRCReturn(rc, rc);
6270 pMixedCtx->SysEnter.eip = u64Val;
6271 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR);
6272 }
6273 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
6274 {
6275 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &u64Val); AssertRCReturn(rc, rc);
6276 pMixedCtx->SysEnter.esp = u64Val;
6277 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR);
6278 }
6279 return rc;
6280}
6281
6282
6283/**
6284 * Saves the set of guest MSRs (that we restore lazily while leaving VT-x) from
6285 * the CPU back into the guest-CPU context.
6286 *
6287 * @returns VBox status code.
6288 * @param pVCpu Pointer to the VMCPU.
6289 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6290 * out-of-sync. Make sure to update the required fields
6291 * before using them.
6292 *
6293 * @remarks No-long-jump zone!!!
6294 */
6295static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6296{
6297#if HC_ARCH_BITS == 64
6298 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
6299 {
6300 /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */
6301 VMMRZCallRing3Disable(pVCpu);
6302 HM_DISABLE_PREEMPT();
6303
6304 /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */
6305 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS))
6306 {
6307 hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx);
6308 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6309 }
6310
6311 HM_RESTORE_PREEMPT();
6312 VMMRZCallRing3Enable(pVCpu);
6313 }
6314 else
6315 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6316#else
6317 NOREF(pMixedCtx);
6318 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6319#endif
6320
6321 return VINF_SUCCESS;
6322}
6323
6324
6325/**
6326 * Saves the auto load/store'd guest MSRs from the current VMCS into
6327 * the guest-CPU context.
6328 *
6329 * @returns VBox status code.
6330 * @param pVCpu Pointer to the VMCPU.
6331 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6332 * out-of-sync. Make sure to update the required fields
6333 * before using them.
6334 *
6335 * @remarks No-long-jump zone!!!
6336 */
6337static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6338{
6339 if (HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS))
6340 return VINF_SUCCESS;
6341
6342 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
6343 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
6344 Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", cMsrs));
6345 for (uint32_t i = 0; i < cMsrs; i++, pMsr++)
6346 {
6347 switch (pMsr->u32Msr)
6348 {
6349 case MSR_K8_TSC_AUX: CPUMR0SetGuestTscAux(pVCpu, pMsr->u64Value); break;
6350 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;
6351 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;
6352 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;
6353 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;
6354 case MSR_K6_EFER: /* Nothing to do here since we intercept writes, see hmR0VmxLoadGuestMsrs(). */
6355 break;
6356
6357 default:
6358 {
6359 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, cMsrs));
6360 pVCpu->hm.s.u32HMError = pMsr->u32Msr;
6361 return VERR_HM_UNEXPECTED_LD_ST_MSR;
6362 }
6363 }
6364 }
6365
6366 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS);
6367 return VINF_SUCCESS;
6368}
6369
6370
6371/**
6372 * Saves the guest control registers from the current VMCS into the guest-CPU
6373 * context.
6374 *
6375 * @returns VBox status code.
6376 * @param pVCpu Pointer to the VMCPU.
6377 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6378 * out-of-sync. Make sure to update the required fields
6379 * before using them.
6380 *
6381 * @remarks No-long-jump zone!!!
6382 */
6383static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6384{
6385 /* Guest CR0. Guest FPU. */
6386 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6387 AssertRCReturn(rc, rc);
6388
6389 /* Guest CR4. */
6390 rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
6391 AssertRCReturn(rc, rc);
6392
6393 /* Guest CR2 - updated always during the world-switch or in #PF. */
6394 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
6395 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3))
6396 {
6397 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
6398 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4));
6399
6400 PVM pVM = pVCpu->CTX_SUFF(pVM);
6401 if ( pVM->hm.s.vmx.fUnrestrictedGuest
6402 || ( pVM->hm.s.fNestedPaging
6403 && CPUMIsGuestPagingEnabledEx(pMixedCtx)))
6404 {
6405 uint64_t u64Val = 0;
6406 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
6407 if (pMixedCtx->cr3 != u64Val)
6408 {
6409 CPUMSetGuestCR3(pVCpu, u64Val);
6410 if (VMMRZCallRing3IsEnabled(pVCpu))
6411 {
6412 PGMUpdateCR3(pVCpu, u64Val);
6413 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6414 }
6415 else
6416 {
6417 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
6418 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
6419 }
6420 }
6421
6422 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
6423 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */
6424 {
6425 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
6426 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
6427 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
6428 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
6429
6430 if (VMMRZCallRing3IsEnabled(pVCpu))
6431 {
6432 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6433 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6434 }
6435 else
6436 {
6437 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
6438 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
6439 }
6440 }
6441 }
6442
6443 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3);
6444 }
6445
6446 /*
6447 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
6448 * -> VMMRZCallRing3Disable() -> hmR0VmxSaveGuestState() -> Set VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
6449 * -> continue with VM-exit handling -> hmR0VmxSaveGuestControlRegs() and here we are.
6450 *
6451 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
6452 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
6453 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
6454 * -NOT- check if HMVMX_UPDATED_GUEST_CR3 is already set or not!
6455 *
6456 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
6457 */
6458 if (VMMRZCallRing3IsEnabled(pVCpu))
6459 {
6460 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6461 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
6462
6463 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6464 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6465
6466 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6467 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6468 }
6469
6470 return rc;
6471}
6472
6473
6474/**
6475 * Reads a guest segment register from the current VMCS into the guest-CPU
6476 * context.
6477 *
6478 * @returns VBox status code.
6479 * @param pVCpu Pointer to the VMCPU.
6480 * @param idxSel Index of the selector in the VMCS.
6481 * @param idxLimit Index of the segment limit in the VMCS.
6482 * @param idxBase Index of the segment base in the VMCS.
6483 * @param idxAccess Index of the access rights of the segment in the VMCS.
6484 * @param pSelReg Pointer to the segment selector.
6485 *
6486 * @remarks No-long-jump zone!!!
6487 * @remarks Never call this function directly!!! Use the VMXLOCAL_READ_SEG()
6488 * macro as that takes care of whether to read from the VMCS cache or
6489 * not.
6490 */
6491DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
6492 PCPUMSELREG pSelReg)
6493{
6494 NOREF(pVCpu);
6495
6496 uint32_t u32Val = 0;
6497 int rc = VMXReadVmcs32(idxSel, &u32Val);
6498 AssertRCReturn(rc, rc);
6499 pSelReg->Sel = (uint16_t)u32Val;
6500 pSelReg->ValidSel = (uint16_t)u32Val;
6501 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6502
6503 rc = VMXReadVmcs32(idxLimit, &u32Val);
6504 AssertRCReturn(rc, rc);
6505 pSelReg->u32Limit = u32Val;
6506
6507 uint64_t u64Val = 0;
6508 rc = VMXReadVmcsGstNByIdxVal(idxBase, &u64Val);
6509 AssertRCReturn(rc, rc);
6510 pSelReg->u64Base = u64Val;
6511
6512 rc = VMXReadVmcs32(idxAccess, &u32Val);
6513 AssertRCReturn(rc, rc);
6514 pSelReg->Attr.u = u32Val;
6515
6516 /*
6517 * If VT-x marks the segment as unusable, most other bits remain undefined:
6518 * - For CS the L, D and G bits have meaning.
6519 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
6520 * - For the remaining data segments no bits are defined.
6521 *
6522 * The present bit and the unusable bit has been observed to be set at the
6523 * same time (the selector was supposed to be invalid as we started executing
6524 * a V8086 interrupt in ring-0).
6525 *
6526 * What should be important for the rest of the VBox code, is that the P bit is
6527 * cleared. Some of the other VBox code recognizes the unusable bit, but
6528 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
6529 * safe side here, we'll strip off P and other bits we don't care about. If
6530 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
6531 *
6532 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
6533 */
6534 if (pSelReg->Attr.u & X86DESCATTR_UNUSABLE)
6535 {
6536 Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR); /* TR is the only selector that can never be unusable. */
6537
6538 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
6539 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
6540 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
6541
6542 Log4(("hmR0VmxReadSegmentReg: Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Val, pSelReg->Attr.u));
6543#ifdef DEBUG_bird
6544 AssertMsg((u32Val & ~X86DESCATTR_P) == pSelReg->Attr.u,
6545 ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
6546 idxSel, u32Val, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
6547#endif
6548 }
6549 return VINF_SUCCESS;
6550}
6551
6552
6553#ifdef VMX_USE_CACHED_VMCS_ACCESSES
6554# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
6555 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
6556 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
6557#else
6558# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
6559 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
6560 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
6561#endif
6562
6563
6564/**
6565 * Saves the guest segment registers from the current VMCS into the guest-CPU
6566 * context.
6567 *
6568 * @returns VBox status code.
6569 * @param pVCpu Pointer to the VMCPU.
6570 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6571 * out-of-sync. Make sure to update the required fields
6572 * before using them.
6573 *
6574 * @remarks No-long-jump zone!!!
6575 */
6576static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6577{
6578 /* Guest segment registers. */
6579 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS))
6580 {
6581 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); AssertRCReturn(rc, rc);
6582 rc = VMXLOCAL_READ_SEG(CS, cs); AssertRCReturn(rc, rc);
6583 rc = VMXLOCAL_READ_SEG(SS, ss); AssertRCReturn(rc, rc);
6584 rc = VMXLOCAL_READ_SEG(DS, ds); AssertRCReturn(rc, rc);
6585 rc = VMXLOCAL_READ_SEG(ES, es); AssertRCReturn(rc, rc);
6586 rc = VMXLOCAL_READ_SEG(FS, fs); AssertRCReturn(rc, rc);
6587 rc = VMXLOCAL_READ_SEG(GS, gs); AssertRCReturn(rc, rc);
6588
6589 /* Restore segment attributes for real-on-v86 mode hack. */
6590 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6591 {
6592 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
6593 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
6594 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
6595 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
6596 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
6597 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
6598 }
6599 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS);
6600 }
6601
6602 return VINF_SUCCESS;
6603}
6604
6605
6606/**
6607 * Saves the guest descriptor table registers and task register from the current
6608 * VMCS into the guest-CPU context.
6609 *
6610 * @returns VBox status code.
6611 * @param pVCpu Pointer to the VMCPU.
6612 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6613 * out-of-sync. Make sure to update the required fields
6614 * before using them.
6615 *
6616 * @remarks No-long-jump zone!!!
6617 */
6618static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6619{
6620 int rc = VINF_SUCCESS;
6621
6622 /* Guest LDTR. */
6623 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR))
6624 {
6625 rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
6626 AssertRCReturn(rc, rc);
6627 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR);
6628 }
6629
6630 /* Guest GDTR. */
6631 uint64_t u64Val = 0;
6632 uint32_t u32Val = 0;
6633 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR))
6634 {
6635 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
6636 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
6637 pMixedCtx->gdtr.pGdt = u64Val;
6638 pMixedCtx->gdtr.cbGdt = u32Val;
6639 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR);
6640 }
6641
6642 /* Guest IDTR. */
6643 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR))
6644 {
6645 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
6646 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
6647 pMixedCtx->idtr.pIdt = u64Val;
6648 pMixedCtx->idtr.cbIdt = u32Val;
6649 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR);
6650 }
6651
6652 /* Guest TR. */
6653 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR))
6654 {
6655 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6656 AssertRCReturn(rc, rc);
6657
6658 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
6659 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6660 {
6661 rc = VMXLOCAL_READ_SEG(TR, tr);
6662 AssertRCReturn(rc, rc);
6663 }
6664 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR);
6665 }
6666 return rc;
6667}
6668
6669#undef VMXLOCAL_READ_SEG
6670
6671
6672/**
6673 * Saves the guest debug-register DR7 from the current VMCS into the guest-CPU
6674 * context.
6675 *
6676 * @returns VBox status code.
6677 * @param pVCpu Pointer to the VMCPU.
6678 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6679 * out-of-sync. Make sure to update the required fields
6680 * before using them.
6681 *
6682 * @remarks No-long-jump zone!!!
6683 */
6684static int hmR0VmxSaveGuestDR7(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6685{
6686 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG))
6687 {
6688 if (!pVCpu->hm.s.fUsingHyperDR7)
6689 {
6690 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
6691 uint32_t u32Val;
6692 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); AssertRCReturn(rc, rc);
6693 pMixedCtx->dr[7] = u32Val;
6694 }
6695
6696 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG);
6697 }
6698 return VINF_SUCCESS;
6699}
6700
6701
6702/**
6703 * Saves the guest APIC state from the current VMCS into the guest-CPU context.
6704 *
6705 * @returns VBox status code.
6706 * @param pVCpu Pointer to the VMCPU.
6707 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6708 * out-of-sync. Make sure to update the required fields
6709 * before using them.
6710 *
6711 * @remarks No-long-jump zone!!!
6712 */
6713static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6714{
6715 NOREF(pMixedCtx);
6716
6717 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
6718 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_APIC_STATE);
6719 return VINF_SUCCESS;
6720}
6721
6722
6723/**
6724 * Saves the entire guest state from the currently active VMCS into the
6725 * guest-CPU context.
6726 *
6727 * This essentially VMREADs all guest-data.
6728 *
6729 * @returns VBox status code.
6730 * @param pVCpu Pointer to the VMCPU.
6731 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6732 * out-of-sync. Make sure to update the required fields
6733 * before using them.
6734 */
6735static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6736{
6737 Assert(pVCpu);
6738 Assert(pMixedCtx);
6739
6740 if (HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL)
6741 return VINF_SUCCESS;
6742
6743 /* Though we can longjmp to ring-3 due to log-flushes here and get recalled
6744 again on the ring-3 callback path, there is no real need to. */
6745 if (VMMRZCallRing3IsEnabled(pVCpu))
6746 VMMR0LogFlushDisable(pVCpu);
6747 else
6748 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6749 Log4Func(("vcpu[%RU32]\n", pVCpu->idCpu));
6750
6751 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
6752 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6753
6754 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6755 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6756
6757 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6758 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6759
6760 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
6761 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6762
6763 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
6764 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDR7 failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6765
6766 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
6767 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6768
6769 rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
6770 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestLazyMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6771
6772 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
6773 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6774
6775 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
6776 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6777
6778 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
6779 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestApicState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6780
6781 AssertMsg(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL,
6782 ("Missed guest state bits while saving state; residue %RX32\n", HMVMXCPU_GST_VALUE(pVCpu)));
6783
6784 if (VMMRZCallRing3IsEnabled(pVCpu))
6785 VMMR0LogFlushEnable(pVCpu);
6786
6787 return VINF_SUCCESS;
6788}
6789
6790
6791/**
6792 * Saves basic guest registers needed for IEM instruction execution.
6793 *
6794 * @returns VBox status code (OR-able).
6795 * @param pVCpu Pointer to the cross context CPU data for the calling
6796 * EMT.
6797 * @param pMixedCtx Pointer to the CPU context of the guest.
6798 * @param fMemory Whether the instruction being executed operates on
6799 * memory or not. Only CR0 is synced up if clear.
6800 * @param fNeedRsp Need RSP (any instruction working on GPRs or stack).
6801 */
6802static int hmR0VmxSaveGuestRegsForIemExec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fMemory, bool fNeedRsp)
6803{
6804 /*
6805 * We assume all general purpose registers other than RSP are available.
6806 *
6807 * RIP is a must, as it will be incremented or otherwise changed.
6808 *
6809 * RFLAGS are always required to figure the CPL.
6810 *
6811 * RSP isn't always required, however it's a GPR, so frequently required.
6812 *
6813 * SS and CS are the only segment register needed if IEM doesn't do memory
6814 * access (CPL + 16/32/64-bit mode), but we can only get all segment registers.
6815 *
6816 * CR0 is always required by IEM for the CPL, while CR3 and CR4 will only
6817 * be required for memory accesses.
6818 *
6819 * Note! Before IEM dispatches an exception, it will call us to sync in everything.
6820 */
6821 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6822 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6823 if (fNeedRsp)
6824 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
6825 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6826 if (!fMemory)
6827 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6828 else
6829 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6830 return rc;
6831}
6832
6833
6834/**
6835 * Ensures that we've got a complete basic guest-context.
6836 *
6837 * This excludes the FPU, SSE, AVX, and similar extended state. The interface
6838 * is for the interpreter.
6839 *
6840 * @returns VBox status code.
6841 * @param pVCpu Pointer to the VMCPU of the calling EMT.
6842 * @param pMixedCtx Pointer to the guest-CPU context which may have data
6843 * needing to be synced in.
6844 * @thread EMT(pVCpu)
6845 */
6846VMMR0_INT_DECL(int) HMR0EnsureCompleteBasicContext(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6847{
6848 /* Note! Since this is only applicable to VT-x, the implementation is placed
6849 in the VT-x part of the sources instead of the generic stuff. */
6850 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
6851 return hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6852 return VINF_SUCCESS;
6853}
6854
6855
6856/**
6857 * Check per-VM and per-VCPU force flag actions that require us to go back to
6858 * ring-3 for one reason or another.
6859 *
6860 * @returns VBox status code (information status code included).
6861 * @retval VINF_SUCCESS if we don't have any actions that require going back to
6862 * ring-3.
6863 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
6864 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
6865 * interrupts)
6866 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
6867 * all EMTs to be in ring-3.
6868 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
6869 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
6870 * to the EM loop.
6871 *
6872 * @param pVM Pointer to the VM.
6873 * @param pVCpu Pointer to the VMCPU.
6874 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6875 * out-of-sync. Make sure to update the required fields
6876 * before using them.
6877 */
6878static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6879{
6880 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6881
6882 if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
6883 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
6884 || VMCPU_FF_IS_PENDING(pVCpu, !pVCpu->hm.s.fSingleInstruction
6885 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
6886 {
6887 /* We need the control registers now, make sure the guest-CPU context is updated. */
6888 int rc3 = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6889 AssertRCReturn(rc3, rc3);
6890
6891 /* Pending HM CR3 sync. */
6892 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6893 {
6894 int rc2 = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
6895 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
6896 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
6897 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6898 }
6899
6900 /* Pending HM PAE PDPEs. */
6901 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6902 {
6903 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6904 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6905 }
6906
6907 /* Pending PGM C3 sync. */
6908 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
6909 {
6910 int rc2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4,
6911 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
6912 if (rc2 != VINF_SUCCESS)
6913 {
6914 AssertRC(rc2);
6915 Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", rc2));
6916 return rc2;
6917 }
6918 }
6919
6920 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
6921 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
6922 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
6923 {
6924 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
6925 int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
6926 Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
6927 return rc2;
6928 }
6929
6930 /* Pending VM request packets, such as hardware interrupts. */
6931 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
6932 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
6933 {
6934 Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
6935 return VINF_EM_PENDING_REQUEST;
6936 }
6937
6938 /* Pending PGM pool flushes. */
6939 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
6940 {
6941 Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
6942 return VINF_PGM_POOL_FLUSH_PENDING;
6943 }
6944
6945 /* Pending DMA requests. */
6946 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
6947 {
6948 Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
6949 return VINF_EM_RAW_TO_R3;
6950 }
6951 }
6952
6953 return VINF_SUCCESS;
6954}
6955
6956
6957/**
6958 * Converts any TRPM trap into a pending HM event. This is typically used when
6959 * entering from ring-3 (not longjmp returns).
6960 *
6961 * @param pVCpu Pointer to the VMCPU.
6962 */
6963static void hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
6964{
6965 Assert(TRPMHasTrap(pVCpu));
6966 Assert(!pVCpu->hm.s.Event.fPending);
6967
6968 uint8_t uVector;
6969 TRPMEVENT enmTrpmEvent;
6970 RTGCUINT uErrCode;
6971 RTGCUINTPTR GCPtrFaultAddress;
6972 uint8_t cbInstr;
6973
6974 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
6975 AssertRC(rc);
6976
6977 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntInfo. */
6978 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
6979 if (enmTrpmEvent == TRPM_TRAP)
6980 {
6981 switch (uVector)
6982 {
6983 case X86_XCPT_NMI:
6984 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6985 break;
6986
6987 case X86_XCPT_BP:
6988 case X86_XCPT_OF:
6989 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6990 break;
6991
6992 case X86_XCPT_PF:
6993 case X86_XCPT_DF:
6994 case X86_XCPT_TS:
6995 case X86_XCPT_NP:
6996 case X86_XCPT_SS:
6997 case X86_XCPT_GP:
6998 case X86_XCPT_AC:
6999 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7000 /* no break! */
7001 default:
7002 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7003 break;
7004 }
7005 }
7006 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
7007 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7008 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
7009 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7010 else
7011 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
7012
7013 rc = TRPMResetTrap(pVCpu);
7014 AssertRC(rc);
7015 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
7016 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
7017
7018 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
7019 STAM_COUNTER_DEC(&pVCpu->hm.s.StatInjectPendingReflect);
7020}
7021
7022
7023/**
7024 * Converts any pending HM event into a TRPM trap. Typically used when leaving
7025 * VT-x to execute any instruction.
7026 *
7027 * @param pvCpu Pointer to the VMCPU.
7028 */
7029static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
7030{
7031 Assert(pVCpu->hm.s.Event.fPending);
7032
7033 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
7034 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo);
7035 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntInfo);
7036 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
7037
7038 /* If a trap was already pending, we did something wrong! */
7039 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
7040
7041 TRPMEVENT enmTrapType;
7042 switch (uVectorType)
7043 {
7044 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
7045 enmTrapType = TRPM_HARDWARE_INT;
7046 break;
7047
7048 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
7049 enmTrapType = TRPM_SOFTWARE_INT;
7050 break;
7051
7052 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
7053 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
7054 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
7055 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
7056 enmTrapType = TRPM_TRAP;
7057 break;
7058
7059 default:
7060 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
7061 enmTrapType = TRPM_32BIT_HACK;
7062 break;
7063 }
7064
7065 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
7066
7067 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
7068 AssertRC(rc);
7069
7070 if (fErrorCodeValid)
7071 TRPMSetErrorCode(pVCpu, uErrorCode);
7072
7073 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
7074 && uVector == X86_XCPT_PF)
7075 {
7076 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
7077 }
7078 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
7079 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
7080 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
7081 {
7082 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
7083 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
7084 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
7085 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
7086 }
7087 pVCpu->hm.s.Event.fPending = false;
7088}
7089
7090
7091/**
7092 * Does the necessary state syncing before returning to ring-3 for any reason
7093 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
7094 *
7095 * @returns VBox status code.
7096 * @param pVM Pointer to the VM.
7097 * @param pVCpu Pointer to the VMCPU.
7098 * @param pMixedCtx Pointer to the guest-CPU context. The data may
7099 * be out-of-sync. Make sure to update the required
7100 * fields before using them.
7101 * @param fSaveGuestState Whether to save the guest state or not.
7102 *
7103 * @remarks No-long-jmp zone!!!
7104 */
7105static int hmR0VmxLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fSaveGuestState)
7106{
7107 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7108 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7109
7110 RTCPUID idCpu = RTMpCpuId();
7111 Log4Func(("HostCpuId=%u\n", idCpu));
7112
7113 /*
7114 * !!! IMPORTANT !!!
7115 * If you modify code here, check whether hmR0VmxCallRing3Callback() needs to be updated too.
7116 */
7117
7118 /* Save the guest state if necessary. */
7119 if ( fSaveGuestState
7120 && HMVMXCPU_GST_VALUE(pVCpu) != HMVMX_UPDATED_GUEST_ALL)
7121 {
7122 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
7123 AssertRCReturn(rc, rc);
7124 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
7125 }
7126
7127 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
7128 if (CPUMIsGuestFPUStateActive(pVCpu))
7129 {
7130 /* We shouldn't reload CR0 without saving it first. */
7131 if (!fSaveGuestState)
7132 {
7133 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7134 AssertRCReturn(rc, rc);
7135 }
7136 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
7137 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
7138 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
7139 }
7140
7141 /* Restore host debug registers if necessary and resync on next R0 reentry. */
7142#ifdef VBOX_STRICT
7143 if (CPUMIsHyperDebugStateActive(pVCpu))
7144 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
7145#endif
7146 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
7147 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
7148 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu));
7149 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu));
7150
7151#if HC_ARCH_BITS == 64
7152 /* Restore host-state bits that VT-x only restores partially. */
7153 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
7154 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
7155 {
7156 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu));
7157 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
7158 }
7159 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
7160#endif
7161
7162#if HC_ARCH_BITS == 64
7163 /* Restore the lazy host MSRs as we're leaving VT-x context. */
7164 if ( pVM->hm.s.fAllow64BitGuests
7165 && pVCpu->hm.s.vmx.fLazyMsrs)
7166 {
7167 /* We shouldn't reload the guest MSRs without saving it first. */
7168 if (!fSaveGuestState)
7169 {
7170 int rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
7171 AssertRCReturn(rc, rc);
7172 }
7173 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS));
7174 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7175 Assert(!pVCpu->hm.s.vmx.fLazyMsrs);
7176 }
7177#endif
7178
7179 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
7180 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7181
7182 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
7183 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
7184 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
7185 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
7186 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
7187 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
7188 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
7189 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7190
7191 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7192
7193 /** @todo This partially defeats the purpose of having preemption hooks.
7194 * The problem is, deregistering the hooks should be moved to a place that
7195 * lasts until the EMT is about to be destroyed not everytime while leaving HM
7196 * context.
7197 */
7198 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7199 {
7200 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7201 AssertRCReturn(rc, rc);
7202
7203 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7204 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
7205 }
7206 Assert(!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
7207 NOREF(idCpu);
7208
7209 return VINF_SUCCESS;
7210}
7211
7212
7213/**
7214 * Leaves the VT-x session.
7215 *
7216 * @returns VBox status code.
7217 * @param pVM Pointer to the VM.
7218 * @param pVCpu Pointer to the VMCPU.
7219 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7220 * out-of-sync. Make sure to update the required fields
7221 * before using them.
7222 *
7223 * @remarks No-long-jmp zone!!!
7224 */
7225DECLINLINE(int) hmR0VmxLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7226{
7227 HM_DISABLE_PREEMPT();
7228 HMVMX_ASSERT_CPU_SAFE();
7229 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7230 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7231
7232 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
7233 and done this from the VMXR0ThreadCtxCallback(). */
7234 if (!pVCpu->hm.s.fLeaveDone)
7235 {
7236 int rc2 = hmR0VmxLeave(pVM, pVCpu, pMixedCtx, true /* fSaveGuestState */);
7237 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2);
7238 pVCpu->hm.s.fLeaveDone = true;
7239 }
7240 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
7241
7242 /*
7243 * !!! IMPORTANT !!!
7244 * If you modify code here, make sure to check whether hmR0VmxCallRing3Callback() needs to be updated too.
7245 */
7246
7247 /* Deregister hook now that we've left HM context before re-enabling preemption. */
7248 /** @todo Deregistering here means we need to VMCLEAR always
7249 * (longjmp/exit-to-r3) in VT-x which is not efficient. */
7250 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
7251 VMMR0ThreadCtxHookDisable(pVCpu);
7252
7253 /* Leave HM context. This takes care of local init (term). */
7254 int rc = HMR0LeaveCpu(pVCpu);
7255
7256 HM_RESTORE_PREEMPT();
7257 return rc;
7258}
7259
7260
7261/**
7262 * Does the necessary state syncing before doing a longjmp to ring-3.
7263 *
7264 * @returns VBox status code.
7265 * @param pVM Pointer to the VM.
7266 * @param pVCpu Pointer to the VMCPU.
7267 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7268 * out-of-sync. Make sure to update the required fields
7269 * before using them.
7270 *
7271 * @remarks No-long-jmp zone!!!
7272 */
7273DECLINLINE(int) hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7274{
7275 return hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
7276}
7277
7278
7279/**
7280 * Take necessary actions before going back to ring-3.
7281 *
7282 * An action requires us to go back to ring-3. This function does the necessary
7283 * steps before we can safely return to ring-3. This is not the same as longjmps
7284 * to ring-3, this is voluntary and prepares the guest so it may continue
7285 * executing outside HM (recompiler/IEM).
7286 *
7287 * @returns VBox status code.
7288 * @param pVM Pointer to the VM.
7289 * @param pVCpu Pointer to the VMCPU.
7290 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7291 * out-of-sync. Make sure to update the required fields
7292 * before using them.
7293 * @param rcExit The reason for exiting to ring-3. Can be
7294 * VINF_VMM_UNKNOWN_RING3_CALL.
7295 */
7296static int hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
7297{
7298 Assert(pVM);
7299 Assert(pVCpu);
7300 Assert(pMixedCtx);
7301 HMVMX_ASSERT_PREEMPT_SAFE();
7302
7303 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
7304 {
7305 VMXGetActivatedVmcs(&pVCpu->hm.s.vmx.LastError.u64VMCSPhys);
7306 pVCpu->hm.s.vmx.LastError.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
7307 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
7308 /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */
7309 }
7310
7311 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
7312 VMMRZCallRing3Disable(pVCpu);
7313 Log4(("hmR0VmxExitToRing3: pVCpu=%p idCpu=%RU32 rcExit=%d\n", pVCpu, pVCpu->idCpu, rcExit));
7314
7315 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
7316 if (pVCpu->hm.s.Event.fPending)
7317 {
7318 hmR0VmxPendingEventToTrpmTrap(pVCpu);
7319 Assert(!pVCpu->hm.s.Event.fPending);
7320 }
7321
7322 /* If we're emulating an instruction, we shouldn't have any TRPM traps pending
7323 and if we're injecting an event we should have a TRPM trap pending. */
7324 AssertMsg(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu), ("%Rrc\n", rcExit));
7325 AssertMsg(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu), ("%Rrc\n", rcExit));
7326
7327 /* Save guest state and restore host state bits. */
7328 int rc = hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
7329 AssertRCReturn(rc, rc);
7330 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7331 /* Thread-context hooks are unregistered at this point!!! */
7332
7333 /* Sync recompiler state. */
7334 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
7335 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
7336 | CPUM_CHANGED_LDTR
7337 | CPUM_CHANGED_GDTR
7338 | CPUM_CHANGED_IDTR
7339 | CPUM_CHANGED_TR
7340 | CPUM_CHANGED_HIDDEN_SEL_REGS);
7341 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
7342 if ( pVM->hm.s.fNestedPaging
7343 && CPUMIsGuestPagingEnabledEx(pMixedCtx))
7344 {
7345 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
7346 }
7347
7348 Assert(!pVCpu->hm.s.fClearTrapFlag);
7349
7350 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
7351 if (rcExit != VINF_EM_RAW_INTERRUPT)
7352 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
7353
7354 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
7355
7356 /* We do -not- want any longjmp notifications after this! We must return to ring-3 ASAP. */
7357 VMMRZCallRing3RemoveNotification(pVCpu);
7358 VMMRZCallRing3Enable(pVCpu);
7359
7360 return rc;
7361}
7362
7363
7364/**
7365 * VMMRZCallRing3() callback wrapper which saves the guest state before we
7366 * longjump to ring-3 and possibly get preempted.
7367 *
7368 * @returns VBox status code.
7369 * @param pVCpu Pointer to the VMCPU.
7370 * @param enmOperation The operation causing the ring-3 longjump.
7371 * @param pvUser Opaque pointer to the guest-CPU context. The data
7372 * may be out-of-sync. Make sure to update the required
7373 * fields before using them.
7374 */
7375DECLCALLBACK(int) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
7376{
7377 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
7378 {
7379 /*
7380 * !!! IMPORTANT !!!
7381 * If you modify code here, check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs to be updated too.
7382 * This is a stripped down version which gets out ASAP, trying to not trigger any further assertions.
7383 */
7384 VMMRZCallRing3RemoveNotification(pVCpu);
7385 VMMRZCallRing3Disable(pVCpu);
7386 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
7387 RTThreadPreemptDisable(&PreemptState);
7388
7389 PVM pVM = pVCpu->CTX_SUFF(pVM);
7390 if (CPUMIsGuestFPUStateActive(pVCpu))
7391 CPUMR0SaveGuestFPU(pVM, pVCpu, (PCPUMCTX)pvUser);
7392
7393 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
7394
7395#if HC_ARCH_BITS == 64
7396 /* Restore host-state bits that VT-x only restores partially. */
7397 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
7398 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
7399 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
7400 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
7401
7402 /* Restore the lazy host MSRs as we're leaving VT-x context. */
7403 if ( pVM->hm.s.fAllow64BitGuests
7404 && pVCpu->hm.s.vmx.fLazyMsrs)
7405 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7406#endif
7407 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
7408 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7409 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7410 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7411 {
7412 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7413 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7414 }
7415
7416 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
7417 VMMR0ThreadCtxHookDisable(pVCpu);
7418 HMR0LeaveCpu(pVCpu);
7419 RTThreadPreemptRestore(&PreemptState);
7420 return VINF_SUCCESS;
7421 }
7422
7423 Assert(pVCpu);
7424 Assert(pvUser);
7425 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7426 HMVMX_ASSERT_PREEMPT_SAFE();
7427
7428 VMMRZCallRing3Disable(pVCpu);
7429 Assert(VMMR0IsLogFlushDisabled(pVCpu));
7430
7431 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32 enmOperation=%d\n", pVCpu, pVCpu->idCpu,
7432 enmOperation));
7433
7434 int rc = hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
7435 AssertRCReturn(rc, rc);
7436
7437 VMMRZCallRing3Enable(pVCpu);
7438 return VINF_SUCCESS;
7439}
7440
7441
7442/**
7443 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
7444 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
7445 *
7446 * @param pVCpu Pointer to the VMCPU.
7447 */
7448DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
7449{
7450 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7451 {
7452 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7453 {
7454 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7455 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7456 AssertRC(rc);
7457 Log4(("Setup interrupt-window exiting\n"));
7458 }
7459 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
7460}
7461
7462
7463/**
7464 * Clears the interrupt-window exiting control in the VMCS.
7465 *
7466 * @param pVCpu Pointer to the VMCPU.
7467 */
7468DECLINLINE(void) hmR0VmxClearIntWindowExitVmcs(PVMCPU pVCpu)
7469{
7470 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7471 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7472 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7473 AssertRC(rc);
7474 Log4(("Cleared interrupt-window exiting\n"));
7475}
7476
7477
7478/**
7479 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
7480 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
7481 *
7482 * @param pVCpu Pointer to the VMCPU.
7483 */
7484DECLINLINE(void) hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu)
7485{
7486 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7487 {
7488 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7489 {
7490 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7491 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7492 AssertRC(rc);
7493 Log4(("Setup NMI-window exiting\n"));
7494 }
7495 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
7496}
7497
7498
7499/**
7500 * Clears the NMI-window exiting control in the VMCS.
7501 *
7502 * @param pVCpu Pointer to the VMCPU.
7503 */
7504DECLINLINE(void) hmR0VmxClearNmiWindowExitVmcs(PVMCPU pVCpu)
7505{
7506 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT);
7507 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7508 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7509 AssertRC(rc);
7510 Log4(("Cleared NMI-window exiting\n"));
7511}
7512
7513
7514/**
7515 * Evaluates the event to be delivered to the guest and sets it as the pending
7516 * event.
7517 *
7518 * @param pVCpu Pointer to the VMCPU.
7519 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7520 * out-of-sync. Make sure to update the required fields
7521 * before using them.
7522 */
7523static void hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7524{
7525 Assert(!pVCpu->hm.s.Event.fPending);
7526
7527 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
7528 uint32_t const uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7529 bool const fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7530 bool const fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7531 bool const fBlockNmi = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
7532
7533 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7534 Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7535 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7536 Assert(!TRPMHasTrap(pVCpu));
7537
7538 /*
7539 * Toggling of interrupt force-flags here is safe since we update TRPM on premature exits
7540 * to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must NOT restore these force-flags.
7541 */
7542 /** @todo SMI. SMIs take priority over NMIs. */
7543 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */
7544 {
7545 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
7546 if ( !fBlockNmi
7547 && !fBlockSti
7548 && !fBlockMovSS)
7549 {
7550 Log4(("Pending NMI vcpu[%RU32]\n", pVCpu->idCpu));
7551 uint32_t u32IntInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
7552 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7553
7554 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7555 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
7556 }
7557 else
7558 hmR0VmxSetNmiWindowExitVmcs(pVCpu);
7559 }
7560 /*
7561 * Check if the guest can receive external interrupts (PIC/APIC). Once we do PDMGetInterrupt() we -must- deliver
7562 * the interrupt ASAP. We must not execute any guest code until we inject the interrupt.
7563 */
7564 else if ( VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
7565 && !pVCpu->hm.s.fSingleInstruction)
7566 {
7567 int rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7568 AssertRC(rc);
7569 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7570 if ( !fBlockInt
7571 && !fBlockSti
7572 && !fBlockMovSS)
7573 {
7574 uint8_t u8Interrupt;
7575 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
7576 if (RT_SUCCESS(rc))
7577 {
7578 Log4(("Pending interrupt vcpu[%RU32] u8Interrupt=%#x \n", pVCpu->idCpu, u8Interrupt));
7579 uint32_t u32IntInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID;
7580 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7581
7582 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrfaultAddress */);
7583 }
7584 else
7585 {
7586 /** @todo Does this actually happen? If not turn it into an assertion. */
7587 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
7588 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
7589 }
7590 }
7591 else
7592 hmR0VmxSetIntWindowExitVmcs(pVCpu);
7593 }
7594}
7595
7596
7597/**
7598 * Sets a pending-debug exception to be delivered to the guest if the guest is
7599 * single-stepping.
7600 *
7601 * @param pVCpu Pointer to the VMCPU.
7602 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7603 * out-of-sync. Make sure to update the required fields
7604 * before using them.
7605 */
7606DECLINLINE(void) hmR0VmxSetPendingDebugXcpt(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7607{
7608 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7609 if (pMixedCtx->eflags.Bits.u1TF) /* We don't have any IA32_DEBUGCTL MSR for guests. Treat as all bits 0. */
7610 {
7611 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
7612 AssertRC(rc);
7613 }
7614}
7615
7616
7617/**
7618 * Injects any pending events into the guest if the guest is in a state to
7619 * receive them.
7620 *
7621 * @returns VBox status code (informational status codes included).
7622 * @param pVCpu Pointer to the VMCPU.
7623 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7624 * out-of-sync. Make sure to update the required fields
7625 * before using them.
7626 * @param fStepping Running in hmR0VmxRunGuestCodeStep() and we should
7627 * return VINF_EM_DBG_STEPPED if the event was
7628 * dispatched directly.
7629 */
7630static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping)
7631{
7632 HMVMX_ASSERT_PREEMPT_SAFE();
7633 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7634
7635 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
7636 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7637 bool fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7638 bool fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7639
7640 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7641 Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7642 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7643 Assert(!TRPMHasTrap(pVCpu));
7644
7645 int rc = VINF_SUCCESS;
7646 if (pVCpu->hm.s.Event.fPending)
7647 {
7648 /*
7649 * Clear any interrupt-window exiting control if we're going to inject an interrupt. Saves one extra
7650 * VM-exit in situations where we previously setup interrupt-window exiting but got other VM-exits and
7651 * ended up enabling interrupts outside VT-x.
7652 */
7653 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
7654 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
7655 && uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7656 {
7657 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7658 hmR0VmxClearIntWindowExitVmcs(pVCpu);
7659 }
7660
7661#ifdef VBOX_STRICT
7662 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7663 {
7664 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7665 Assert(!fBlockInt);
7666 Assert(!fBlockSti);
7667 Assert(!fBlockMovSS);
7668 }
7669 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
7670 {
7671 bool const fBlockNmi = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
7672 Assert(!fBlockSti);
7673 Assert(!fBlockMovSS);
7674 Assert(!fBlockNmi);
7675 }
7676#endif
7677 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#x\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
7678 (uint8_t)uIntType));
7679 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
7680 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping, &uIntrState);
7681 AssertRCReturn(rc, rc);
7682
7683 /* Update the interruptibility-state as it could have been changed by
7684 hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */
7685 fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7686 fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7687
7688#ifdef VBOX_WITH_STATISTICS
7689 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7690 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
7691 else
7692 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
7693#endif
7694 }
7695
7696 /* Deliver pending debug exception if the guest is single-stepping. Evaluate and set the BS bit. */
7697 if ( fBlockSti
7698 || fBlockMovSS)
7699 {
7700 if ( !pVCpu->hm.s.fSingleInstruction
7701 && !DBGFIsStepping(pVCpu))
7702 {
7703 /*
7704 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD,
7705 * VMX_EXIT_MTF, VMX_EXIT_APIC_WRITE and VMX_EXIT_VIRTUALIZED_EOI.
7706 * See Intel spec. 27.3.4 "Saving Non-Register State".
7707 */
7708 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7709 AssertRCReturn(rc2, rc2);
7710 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
7711 }
7712 else if (pMixedCtx->eflags.Bits.u1TF)
7713 {
7714 /*
7715 * We are single-stepping in the hypervisor debugger using EFLAGS.TF. Clear interrupt inhibition as setting the
7716 * BS bit would mean delivering a #DB to the guest upon VM-entry when it shouldn't be.
7717 */
7718 Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG));
7719 uIntrState = 0;
7720 }
7721 }
7722
7723 /*
7724 * There's no need to clear the VM-entry interruption-information field here if we're not injecting anything.
7725 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7726 */
7727 int rc2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
7728 AssertRC(rc2);
7729
7730 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping));
7731 NOREF(fBlockMovSS); NOREF(fBlockSti);
7732 return rc;
7733}
7734
7735
7736/**
7737 * Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
7738 *
7739 * @param pVCpu Pointer to the VMCPU.
7740 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7741 * out-of-sync. Make sure to update the required fields
7742 * before using them.
7743 */
7744DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7745{
7746 NOREF(pMixedCtx);
7747 uint32_t u32IntInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID;
7748 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7749}
7750
7751
7752/**
7753 * Injects a double-fault (#DF) exception into the VM.
7754 *
7755 * @returns VBox status code (informational status code included).
7756 * @param pVCpu Pointer to the VMCPU.
7757 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7758 * out-of-sync. Make sure to update the required fields
7759 * before using them.
7760 * @param fStepping Whether we're running in hmR0VmxRunGuestCodeStep()
7761 * and should return VINF_EM_DBG_STEPPED if the event
7762 * is injected directly (register modified by us, not
7763 * by hardware on VM-entry).
7764 * @param puIntrState Pointer to the current guest interruptibility-state.
7765 * This interruptibility-state will be updated if
7766 * necessary. This cannot not be NULL.
7767 */
7768DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping, uint32_t *puIntrState)
7769{
7770 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7771 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7772 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7773 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
7774 fStepping, puIntrState);
7775}
7776
7777
7778/**
7779 * Sets a debug (#DB) exception as pending-for-injection into the VM.
7780 *
7781 * @param pVCpu Pointer to the VMCPU.
7782 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7783 * out-of-sync. Make sure to update the required fields
7784 * before using them.
7785 */
7786DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7787{
7788 NOREF(pMixedCtx);
7789 uint32_t u32IntInfo = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID;
7790 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7791 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7792}
7793
7794
7795/**
7796 * Sets an overflow (#OF) exception as pending-for-injection into the VM.
7797 *
7798 * @param pVCpu Pointer to the VMCPU.
7799 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7800 * out-of-sync. Make sure to update the required fields
7801 * before using them.
7802 * @param cbInstr The value of RIP that is to be pushed on the guest
7803 * stack.
7804 */
7805DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
7806{
7807 NOREF(pMixedCtx);
7808 uint32_t u32IntInfo = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7809 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7810 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7811}
7812
7813
7814/**
7815 * Injects a general-protection (#GP) fault into the VM.
7816 *
7817 * @returns VBox status code (informational status code included).
7818 * @param pVCpu Pointer to the VMCPU.
7819 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7820 * out-of-sync. Make sure to update the required fields
7821 * before using them.
7822 * @param fErrorCodeValid Whether the error code is valid (depends on the CPU
7823 * mode, i.e. in real-mode it's not valid).
7824 * @param u32ErrorCode The error code associated with the #GP.
7825 * @param fStepping Whether we're running in
7826 * hmR0VmxRunGuestCodeStep() and should return
7827 * VINF_EM_DBG_STEPPED if the event is injected
7828 * directly (register modified by us, not by
7829 * hardware on VM-entry).
7830 * @param puIntrState Pointer to the current guest interruptibility-state.
7831 * This interruptibility-state will be updated if
7832 * necessary. This cannot not be NULL.
7833 */
7834DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
7835 bool fStepping, uint32_t *puIntrState)
7836{
7837 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7838 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7839 if (fErrorCodeValid)
7840 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7841 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
7842 fStepping, puIntrState);
7843}
7844
7845
7846/**
7847 * Sets a general-protection (#GP) exception as pending-for-injection into the
7848 * VM.
7849 *
7850 * @param pVCpu Pointer to the VMCPU.
7851 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7852 * out-of-sync. Make sure to update the required fields
7853 * before using them.
7854 * @param u32ErrorCode The error code associated with the #GP.
7855 */
7856DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t u32ErrorCode)
7857{
7858 NOREF(pMixedCtx);
7859 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7860 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7861 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7862 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */);
7863}
7864
7865
7866/**
7867 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
7868 *
7869 * @param pVCpu Pointer to the VMCPU.
7870 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7871 * out-of-sync. Make sure to update the required fields
7872 * before using them.
7873 * @param uVector The software interrupt vector number.
7874 * @param cbInstr The value of RIP that is to be pushed on the guest
7875 * stack.
7876 */
7877DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
7878{
7879 NOREF(pMixedCtx);
7880 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
7881 if ( uVector == X86_XCPT_BP
7882 || uVector == X86_XCPT_OF)
7883 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7884 else
7885 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7886 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7887}
7888
7889
7890/**
7891 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
7892 * stack.
7893 *
7894 * @returns VBox status code (information status code included).
7895 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
7896 * @param pVM Pointer to the VM.
7897 * @param pMixedCtx Pointer to the guest-CPU context.
7898 * @param uValue The value to push to the guest stack.
7899 */
7900DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
7901{
7902 /*
7903 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
7904 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
7905 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
7906 */
7907 if (pMixedCtx->sp == 1)
7908 return VINF_EM_RESET;
7909 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
7910 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
7911 AssertRCReturn(rc, rc);
7912 return rc;
7913}
7914
7915
7916/**
7917 * Injects an event into the guest upon VM-entry by updating the relevant fields
7918 * in the VM-entry area in the VMCS.
7919 *
7920 * @returns VBox status code (informational error codes included).
7921 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
7922 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
7923 *
7924 * @param pVCpu Pointer to the VMCPU.
7925 * @param pMixedCtx Pointer to the guest-CPU context. The data may
7926 * be out-of-sync. Make sure to update the required
7927 * fields before using them.
7928 * @param u64IntInfo The VM-entry interruption-information field.
7929 * @param cbInstr The VM-entry instruction length in bytes (for
7930 * software interrupts, exceptions and privileged
7931 * software exceptions).
7932 * @param u32ErrCode The VM-entry exception error code.
7933 * @param GCPtrFaultAddress The page-fault address for #PF exceptions.
7934 * @param puIntrState Pointer to the current guest interruptibility-state.
7935 * This interruptibility-state will be updated if
7936 * necessary. This cannot not be NULL.
7937 * @param fStepping Whether we're running in
7938 * hmR0VmxRunGuestCodeStep() and should return
7939 * VINF_EM_DBG_STEPPED if the event is injected
7940 * directly (register modified by us, not by
7941 * hardware on VM-entry).
7942 *
7943 * @remarks Requires CR0!
7944 * @remarks No-long-jump zone!!!
7945 */
7946static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
7947 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *puIntrState)
7948{
7949 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
7950 AssertMsg(u64IntInfo >> 32 == 0, ("%#RX64\n", u64IntInfo));
7951 Assert(puIntrState);
7952 uint32_t u32IntInfo = (uint32_t)u64IntInfo;
7953
7954 uint32_t const uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntInfo);
7955 uint32_t const uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo);
7956
7957#ifdef VBOX_STRICT
7958 /* Validate the error-code-valid bit for hardware exceptions. */
7959 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT)
7960 {
7961 switch (uVector)
7962 {
7963 case X86_XCPT_PF:
7964 case X86_XCPT_DF:
7965 case X86_XCPT_TS:
7966 case X86_XCPT_NP:
7967 case X86_XCPT_SS:
7968 case X86_XCPT_GP:
7969 case X86_XCPT_AC:
7970 AssertMsg(VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo),
7971 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
7972 /* fallthru */
7973 default:
7974 break;
7975 }
7976 }
7977#endif
7978
7979 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
7980 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
7981 || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
7982
7983 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
7984
7985 /* We require CR0 to check if the guest is in real-mode. */
7986 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7987 AssertRCReturn(rc, rc);
7988
7989 /*
7990 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
7991 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
7992 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
7993 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
7994 */
7995 if (CPUMIsGuestInRealModeEx(pMixedCtx))
7996 {
7997 PVM pVM = pVCpu->CTX_SUFF(pVM);
7998 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
7999 {
8000 Assert(PDMVmmDevHeapIsEnabled(pVM));
8001 Assert(pVM->hm.s.vmx.pRealModeTSS);
8002
8003 /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */
8004 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8005 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
8006 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
8007 AssertRCReturn(rc, rc);
8008 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP));
8009
8010 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
8011 size_t const cbIdtEntry = sizeof(X86IDTR16);
8012 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
8013 {
8014 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
8015 if (uVector == X86_XCPT_DF)
8016 return VINF_EM_RESET;
8017
8018 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
8019 if (uVector == X86_XCPT_GP)
8020 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, puIntrState);
8021
8022 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
8023 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
8024 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */,
8025 fStepping, puIntrState);
8026 }
8027
8028 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
8029 uint16_t uGuestIp = pMixedCtx->ip;
8030 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
8031 {
8032 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
8033 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
8034 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
8035 }
8036 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
8037 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
8038
8039 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
8040 X86IDTR16 IdtEntry;
8041 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
8042 rc = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
8043 AssertRCReturn(rc, rc);
8044
8045 /* Construct the stack frame for the interrupt/exception handler. */
8046 rc = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
8047 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
8048 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
8049 AssertRCReturn(rc, rc);
8050
8051 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
8052 if (rc == VINF_SUCCESS)
8053 {
8054 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
8055 pMixedCtx->rip = IdtEntry.offSel;
8056 pMixedCtx->cs.Sel = IdtEntry.uSel;
8057 pMixedCtx->cs.ValidSel = IdtEntry.uSel;
8058 pMixedCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
8059 if ( uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
8060 && uVector == X86_XCPT_PF)
8061 pMixedCtx->cr2 = GCPtrFaultAddress;
8062
8063 /* If any other guest-state bits are changed here, make sure to update
8064 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */
8065 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS
8066 | HM_CHANGED_GUEST_RIP
8067 | HM_CHANGED_GUEST_RFLAGS
8068 | HM_CHANGED_GUEST_RSP);
8069
8070 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
8071 if (*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
8072 {
8073 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
8074 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
8075 Log4(("Clearing inhibition due to STI.\n"));
8076 *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
8077 }
8078 Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
8079 u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->eflags.u, pMixedCtx->cs.Sel, pMixedCtx->eip));
8080
8081 /* The event has been truly dispatched. Mark it as no longer pending so we don't attempt to 'undo'
8082 it, if we are returning to ring-3 before executing guest code. */
8083 pVCpu->hm.s.Event.fPending = false;
8084
8085 /* Make hmR0VmxPreRunGuest return if we're stepping since we've changed cs:rip. */
8086 if (fStepping)
8087 rc = VINF_EM_DBG_STEPPED;
8088 }
8089 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping));
8090 return rc;
8091 }
8092
8093 /*
8094 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
8095 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
8096 */
8097 u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
8098 }
8099
8100 /* Validate. */
8101 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
8102 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(u32IntInfo)); /* Bit 12 MBZ. */
8103 Assert(!(u32IntInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
8104
8105 /* Inject. */
8106 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
8107 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo))
8108 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
8109 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
8110
8111 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
8112 && uVector == X86_XCPT_PF)
8113 pMixedCtx->cr2 = GCPtrFaultAddress;
8114
8115 Log4(("Injecting vcpu[%RU32] u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", pVCpu->idCpu,
8116 u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
8117
8118 AssertRCReturn(rc, rc);
8119 return rc;
8120}
8121
8122
8123/**
8124 * Clears the interrupt-window exiting control in the VMCS and if necessary
8125 * clears the current event in the VMCS as well.
8126 *
8127 * @returns VBox status code.
8128 * @param pVCpu Pointer to the VMCPU.
8129 *
8130 * @remarks Use this function only to clear events that have not yet been
8131 * delivered to the guest but are injected in the VMCS!
8132 * @remarks No-long-jump zone!!!
8133 */
8134static void hmR0VmxClearEventVmcs(PVMCPU pVCpu)
8135{
8136 int rc;
8137 Log4Func(("vcpu[%d]\n", pVCpu->idCpu));
8138
8139 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
8140 hmR0VmxClearIntWindowExitVmcs(pVCpu);
8141
8142 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)
8143 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
8144
8145 if (!pVCpu->hm.s.Event.fPending)
8146 return;
8147
8148#ifdef VBOX_STRICT
8149 uint32_t u32EntryInfo;
8150 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
8151 AssertRC(rc);
8152 Assert(VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo));
8153#endif
8154
8155 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0);
8156 AssertRC(rc);
8157
8158 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
8159 AssertRC(rc);
8160
8161 /* We deliberately don't clear "hm.s.Event.fPending" here, it's taken
8162 care of in hmR0VmxExitToRing3() converting the pending event to TRPM. */
8163}
8164
8165
8166/**
8167 * Enters the VT-x session.
8168 *
8169 * @returns VBox status code.
8170 * @param pVM Pointer to the VM.
8171 * @param pVCpu Pointer to the VMCPU.
8172 * @param pCpu Pointer to the CPU info struct.
8173 */
8174VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
8175{
8176 AssertPtr(pVM);
8177 AssertPtr(pVCpu);
8178 Assert(pVM->hm.s.vmx.fSupported);
8179 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8180 NOREF(pCpu); NOREF(pVM);
8181
8182 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8183 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
8184
8185#ifdef VBOX_STRICT
8186 /* Make sure we're in VMX root mode. */
8187 RTCCUINTREG u32HostCR4 = ASMGetCR4();
8188 if (!(u32HostCR4 & X86_CR4_VMXE))
8189 {
8190 LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
8191 return VERR_VMX_X86_CR4_VMXE_CLEARED;
8192 }
8193#endif
8194
8195 /*
8196 * Load the VCPU's VMCS as the current (and active) one.
8197 */
8198 Assert(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR);
8199 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8200 if (RT_FAILURE(rc))
8201 return rc;
8202
8203 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8204 pVCpu->hm.s.fLeaveDone = false;
8205 Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8206
8207 return VINF_SUCCESS;
8208}
8209
8210
8211/**
8212 * The thread-context callback (only on platforms which support it).
8213 *
8214 * @param enmEvent The thread-context event.
8215 * @param pVCpu Pointer to the VMCPU.
8216 * @param fGlobalInit Whether global VT-x/AMD-V init. was used.
8217 * @thread EMT(pVCpu)
8218 */
8219VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
8220{
8221 NOREF(fGlobalInit);
8222
8223 switch (enmEvent)
8224 {
8225 case RTTHREADCTXEVENT_OUT:
8226 {
8227 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8228 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
8229 VMCPU_ASSERT_EMT(pVCpu);
8230
8231 PVM pVM = pVCpu->CTX_SUFF(pVM);
8232 PCPUMCTX pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu);
8233
8234 /* No longjmps (logger flushes, locks) in this fragile context. */
8235 VMMRZCallRing3Disable(pVCpu);
8236 Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId()));
8237
8238 /*
8239 * Restore host-state (FPU, debug etc.)
8240 */
8241 if (!pVCpu->hm.s.fLeaveDone)
8242 {
8243 /* Do -not- save guest-state here as we might already be in the middle of saving it (esp. bad if we are
8244 holding the PGM lock while saving the guest state (see hmR0VmxSaveGuestControlRegs()). */
8245 hmR0VmxLeave(pVM, pVCpu, pMixedCtx, false /* fSaveGuestState */);
8246 pVCpu->hm.s.fLeaveDone = true;
8247 }
8248
8249 /* Leave HM context, takes care of local init (term). */
8250 int rc = HMR0LeaveCpu(pVCpu);
8251 AssertRC(rc); NOREF(rc);
8252
8253 /* Restore longjmp state. */
8254 VMMRZCallRing3Enable(pVCpu);
8255 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreempt);
8256 break;
8257 }
8258
8259 case RTTHREADCTXEVENT_IN:
8260 {
8261 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8262 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
8263 VMCPU_ASSERT_EMT(pVCpu);
8264
8265 /* No longjmps here, as we don't want to trigger preemption (& its hook) while resuming. */
8266 VMMRZCallRing3Disable(pVCpu);
8267 Log4Func(("Resumed: HostCpuId=%u\n", RTMpCpuId()));
8268
8269 /* Initialize the bare minimum state required for HM. This takes care of
8270 initializing VT-x if necessary (onlined CPUs, local init etc.) */
8271 int rc = HMR0EnterCpu(pVCpu);
8272 AssertRC(rc);
8273 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
8274
8275 /* Load the active VMCS as the current one. */
8276 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR)
8277 {
8278 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8279 AssertRC(rc); NOREF(rc);
8280 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8281 Log4Func(("Resumed: Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8282 }
8283 pVCpu->hm.s.fLeaveDone = false;
8284
8285 /* Restore longjmp state. */
8286 VMMRZCallRing3Enable(pVCpu);
8287 break;
8288 }
8289
8290 default:
8291 break;
8292 }
8293}
8294
8295
8296/**
8297 * Saves the host state in the VMCS host-state.
8298 * Sets up the VM-exit MSR-load area.
8299 *
8300 * The CPU state will be loaded from these fields on every successful VM-exit.
8301 *
8302 * @returns VBox status code.
8303 * @param pVM Pointer to the VM.
8304 * @param pVCpu Pointer to the VMCPU.
8305 *
8306 * @remarks No-long-jump zone!!!
8307 */
8308static int hmR0VmxSaveHostState(PVM pVM, PVMCPU pVCpu)
8309{
8310 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8311
8312 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
8313 return VINF_SUCCESS;
8314
8315 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
8316 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8317
8318 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
8319 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8320
8321 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
8322 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8323
8324 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
8325 return rc;
8326}
8327
8328
8329/**
8330 * Saves the host state in the VMCS host-state.
8331 *
8332 * @returns VBox status code.
8333 * @param pVM Pointer to the VM.
8334 * @param pVCpu Pointer to the VMCPU.
8335 *
8336 * @remarks No-long-jump zone!!!
8337 */
8338VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
8339{
8340 AssertPtr(pVM);
8341 AssertPtr(pVCpu);
8342
8343 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8344
8345 /* Save the host state here while entering HM context. When thread-context hooks are used, we might get preempted
8346 and have to resave the host state but most of the time we won't be, so do it here before we disable interrupts. */
8347 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8348 return hmR0VmxSaveHostState(pVM, pVCpu);
8349}
8350
8351
8352/**
8353 * Loads the guest state into the VMCS guest-state area.
8354 *
8355 * The will typically be done before VM-entry when the guest-CPU state and the
8356 * VMCS state may potentially be out of sync.
8357 *
8358 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas. Sets up the
8359 * VM-entry controls.
8360 * Sets up the appropriate VMX non-root function to execute guest code based on
8361 * the guest CPU mode.
8362 *
8363 * @returns VBox status code.
8364 * @param pVM Pointer to the VM.
8365 * @param pVCpu Pointer to the VMCPU.
8366 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8367 * out-of-sync. Make sure to update the required fields
8368 * before using them.
8369 *
8370 * @remarks No-long-jump zone!!!
8371 */
8372static int hmR0VmxLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
8373{
8374 AssertPtr(pVM);
8375 AssertPtr(pVCpu);
8376 AssertPtr(pMixedCtx);
8377 HMVMX_ASSERT_PREEMPT_SAFE();
8378
8379 VMMRZCallRing3Disable(pVCpu);
8380 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8381
8382 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8383
8384 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
8385
8386 /* Determine real-on-v86 mode. */
8387 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
8388 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
8389 && CPUMIsGuestInRealModeEx(pMixedCtx))
8390 {
8391 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
8392 }
8393
8394 /*
8395 * Load the guest-state into the VMCS.
8396 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
8397 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
8398 */
8399 int rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
8400 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8401
8402 /* This needs to be done after hmR0VmxSetupVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */
8403 rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
8404 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8405
8406 /* This needs to be done after hmR0VmxSetupVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */
8407 rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
8408 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8409
8410 rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
8411 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8412
8413 rc = hmR0VmxLoadGuestCR3AndCR4(pVCpu, pMixedCtx);
8414 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestCR3AndCR4: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8415
8416 /* Assumes pMixedCtx->cr0 is up-to-date (strict builds require CR0 for segment register validation checks). */
8417 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
8418 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8419
8420 /* This needs to be done after hmR0VmxLoadGuestEntryCtls() and hmR0VmxLoadGuestExitCtls() as it may alter controls if we
8421 determine we don't have to swap EFER after all. */
8422 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
8423 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadSharedMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8424
8425 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
8426 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8427
8428 rc = hmR0VmxLoadGuestXcptIntercepts(pVCpu, pMixedCtx);
8429 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestXcptIntercepts! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8430
8431 /*
8432 * Loading Rflags here is fine, even though Rflags.TF might depend on guest debug state (which is not loaded here).
8433 * It is re-evaluated and updated if necessary in hmR0VmxLoadSharedState().
8434 */
8435 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
8436 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestRipRspRflags! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8437
8438 /* Clear any unused and reserved bits. */
8439 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
8440
8441 VMMRZCallRing3Enable(pVCpu);
8442
8443 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
8444 return rc;
8445}
8446
8447
8448/**
8449 * Loads the state shared between the host and guest into the VMCS.
8450 *
8451 * @param pVM Pointer to the VM.
8452 * @param pVCpu Pointer to the VMCPU.
8453 * @param pCtx Pointer to the guest-CPU context.
8454 *
8455 * @remarks No-long-jump zone!!!
8456 */
8457static void hmR0VmxLoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8458{
8459 NOREF(pVM);
8460
8461 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8462 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8463
8464 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
8465 {
8466 int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx);
8467 AssertRC(rc);
8468 }
8469
8470 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
8471 {
8472 int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx);
8473 AssertRC(rc);
8474
8475 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
8476 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
8477 {
8478 rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx);
8479 AssertRC(rc);
8480 }
8481 }
8482
8483 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS))
8484 {
8485#if HC_ARCH_BITS == 64
8486 if (pVM->hm.s.fAllow64BitGuests)
8487 hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
8488#endif
8489 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
8490 }
8491
8492 /* Loading CR0, debug state might have changed intercepts, update VMCS. */
8493 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
8494 {
8495 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
8496 AssertRC(rc);
8497 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
8498 }
8499
8500 AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
8501 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8502}
8503
8504
8505/**
8506 * Worker for loading the guest-state bits in the inner VT-x execution loop.
8507 *
8508 * @param pVM Pointer to the VM.
8509 * @param pVCpu Pointer to the VMCPU.
8510 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8511 * out-of-sync. Make sure to update the required fields
8512 * before using them.
8513 */
8514DECLINLINE(void) hmR0VmxLoadGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
8515{
8516 HMVMX_ASSERT_PREEMPT_SAFE();
8517
8518 Log5(("LoadFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8519#ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
8520 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
8521#endif
8522
8523 if (HMCPU_CF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP))
8524 {
8525 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
8526 AssertRC(rc);
8527 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
8528 }
8529 else if (HMCPU_CF_VALUE(pVCpu))
8530 {
8531 int rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx);
8532 AssertRC(rc);
8533 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
8534 }
8535
8536 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
8537 AssertMsg( !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
8538 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
8539 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8540}
8541
8542
8543/**
8544 * Does the preparations before executing guest code in VT-x.
8545 *
8546 * This may cause longjmps to ring-3 and may even result in rescheduling to the
8547 * recompiler/IEM. We must be cautious what we do here regarding committing
8548 * guest-state information into the VMCS assuming we assuredly execute the
8549 * guest in VT-x mode.
8550 *
8551 * If we fall back to the recompiler/IEM after updating the VMCS and clearing
8552 * the common-state (TRPM/forceflags), we must undo those changes so that the
8553 * recompiler/IEM can (and should) use them when it resumes guest execution.
8554 * Otherwise such operations must be done when we can no longer exit to ring-3.
8555 *
8556 * @returns Strict VBox status code.
8557 * @retval VINF_SUCCESS if we can proceed with running the guest, interrupts
8558 * have been disabled.
8559 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a
8560 * double-fault into the guest.
8561 * @retval VINF_EM_DBG_STEPPED if @a fStepping is true and an event was
8562 * dispatched directly.
8563 * @retval VINF_* scheduling changes, we have to go back to ring-3.
8564 *
8565 * @param pVM Pointer to the VM.
8566 * @param pVCpu Pointer to the VMCPU.
8567 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8568 * out-of-sync. Make sure to update the required fields
8569 * before using them.
8570 * @param pVmxTransient Pointer to the VMX transient structure.
8571 * @param fStepping Set if called from hmR0VmxRunGuestCodeStep(). Makes
8572 * us ignore some of the reasons for returning to
8573 * ring-3, and return VINF_EM_DBG_STEPPED if event
8574 * dispatching took place.
8575 */
8576static int hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping)
8577{
8578 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8579
8580#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
8581 PGMRZDynMapFlushAutoSet(pVCpu);
8582#endif
8583
8584 /* Check force flag actions that might require us to go back to ring-3. */
8585 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
8586 if (rc != VINF_SUCCESS)
8587 return rc;
8588
8589#ifndef IEM_VERIFICATION_MODE_FULL
8590 /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
8591 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
8592 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
8593 {
8594 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
8595 RTGCPHYS GCPhysApicBase;
8596 GCPhysApicBase = pMixedCtx->msrApicBase;
8597 GCPhysApicBase &= PAGE_BASE_GC_MASK;
8598
8599 /* Unalias any existing mapping. */
8600 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
8601 AssertRCReturn(rc, rc);
8602
8603 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
8604 Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));
8605 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
8606 AssertRCReturn(rc, rc);
8607
8608 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
8609 }
8610#endif /* !IEM_VERIFICATION_MODE_FULL */
8611
8612 if (TRPMHasTrap(pVCpu))
8613 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
8614 else if (!pVCpu->hm.s.Event.fPending)
8615 hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);
8616
8617 /*
8618 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus needs to be done with
8619 * longjmps or interrupts + preemption enabled. Event injection might also result in triple-faulting the VM.
8620 */
8621 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fStepping);
8622 if (RT_UNLIKELY(rc != VINF_SUCCESS))
8623 {
8624 Assert(rc == VINF_EM_RESET || (rc == VINF_EM_DBG_STEPPED && fStepping));
8625 return rc;
8626 }
8627
8628 /*
8629 * Load the guest state bits, we can handle longjmps/getting preempted here.
8630 *
8631 * If we are injecting events to a real-on-v86 mode guest, we will have to update
8632 * RIP and some segment registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs().
8633 * Hence, this needs to be done -after- injection of events.
8634 */
8635 hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);
8636
8637 /*
8638 * No longjmps to ring-3 from this point on!!!
8639 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
8640 * This also disables flushing of the R0-logger instance (if any).
8641 */
8642 VMMRZCallRing3Disable(pVCpu);
8643
8644 /*
8645 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
8646 * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
8647 *
8648 * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
8649 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
8650 *
8651 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
8652 * executing guest code.
8653 */
8654 pVmxTransient->fEFlags = ASMIntDisableFlags();
8655 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
8656 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
8657 && ( !fStepping /* Optimized for the non-stepping case, of course. */
8658 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) )
8659 {
8660 hmR0VmxClearEventVmcs(pVCpu);
8661 ASMSetFlags(pVmxTransient->fEFlags);
8662 VMMRZCallRing3Enable(pVCpu);
8663 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
8664 return VINF_EM_RAW_TO_R3;
8665 }
8666
8667 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
8668 {
8669 hmR0VmxClearEventVmcs(pVCpu);
8670 ASMSetFlags(pVmxTransient->fEFlags);
8671 VMMRZCallRing3Enable(pVCpu);
8672 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
8673 return VINF_EM_RAW_INTERRUPT;
8674 }
8675
8676 /* We've injected any pending events. This is really the point of no return (to ring-3). */
8677 pVCpu->hm.s.Event.fPending = false;
8678
8679 return VINF_SUCCESS;
8680}
8681
8682
8683/**
8684 * Prepares to run guest code in VT-x and we've committed to doing so. This
8685 * means there is no backing out to ring-3 or anywhere else at this
8686 * point.
8687 *
8688 * @param pVM Pointer to the VM.
8689 * @param pVCpu Pointer to the VMCPU.
8690 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8691 * out-of-sync. Make sure to update the required fields
8692 * before using them.
8693 * @param pVmxTransient Pointer to the VMX transient structure.
8694 *
8695 * @remarks Called with preemption disabled.
8696 * @remarks No-long-jump zone!!!
8697 */
8698static void hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8699{
8700 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8701 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8702 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8703
8704 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8705 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
8706
8707#ifdef HMVMX_ALWAYS_SWAP_FPU_STATE
8708 if (!CPUMIsGuestFPUStateActive(pVCpu))
8709 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8710 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8711#endif
8712
8713 if ( pVCpu->hm.s.fPreloadGuestFpu
8714 && !CPUMIsGuestFPUStateActive(pVCpu))
8715 {
8716 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8717 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
8718 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8719 }
8720
8721 /*
8722 * Lazy-update of the host MSRs values in the auto-load/store MSR area.
8723 */
8724 if ( !pVCpu->hm.s.vmx.fUpdatedHostMsrs
8725 && pVCpu->hm.s.vmx.cMsrs > 0)
8726 {
8727 hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu);
8728 }
8729
8730 /*
8731 * Load the host state bits as we may've been preempted (only happens when
8732 * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM).
8733 */
8734 /** @todo Why should hmR0VmxSetupVMRunHandler() changing pfnStartVM have
8735 * any effect to the host state needing to be saved? */
8736 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
8737 {
8738 /* This ASSUMES that pfnStartVM has been set up already. */
8739 int rc = hmR0VmxSaveHostState(pVM, pVCpu);
8740 AssertRC(rc);
8741 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreemptSaveHostState);
8742 }
8743 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT));
8744
8745 /*
8746 * Load the state shared between host and guest (FPU, debug, lazy MSRs).
8747 */
8748 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
8749 hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx);
8750 AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8751
8752 /* Store status of the shared guest-host state at the time of VM-entry. */
8753#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
8754 if (CPUMIsGuestInLongModeEx(pMixedCtx))
8755 {
8756 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
8757 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
8758 }
8759 else
8760#endif
8761 {
8762 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
8763 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
8764 }
8765 pVmxTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);
8766
8767 /*
8768 * Cache the TPR-shadow for checking on every VM-exit if it might have changed.
8769 */
8770 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8771 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
8772
8773 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
8774 RTCPUID idCurrentCpu = pCpu->idCpu;
8775 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
8776 || idCurrentCpu != pVCpu->hm.s.idLastCpu)
8777 {
8778 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVM, pVCpu);
8779 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
8780 }
8781
8782 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
8783 hmR0VmxFlushTaggedTlb(pVCpu, pCpu); /* Invalidate the appropriate guest entries from the TLB. */
8784 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
8785 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */
8786
8787 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
8788
8789 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
8790 to start executing. */
8791
8792 /*
8793 * Load the TSC_AUX MSR when we are not intercepting RDTSCP.
8794 */
8795 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
8796 {
8797 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8798 {
8799 bool fMsrUpdated;
8800 int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
8801 AssertRC(rc2);
8802 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS));
8803
8804 rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMR0GetGuestTscAux(pVCpu), true /* fUpdateHostMsr */,
8805 &fMsrUpdated);
8806 AssertRC(rc2);
8807 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8808
8809 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */
8810 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
8811 }
8812 else
8813 {
8814 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX);
8815 Assert(!pVCpu->hm.s.vmx.cMsrs || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8816 }
8817 }
8818
8819#ifdef VBOX_STRICT
8820 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu);
8821 hmR0VmxCheckHostEferMsr(pVCpu);
8822 AssertRC(hmR0VmxCheckVmcsCtls(pVCpu));
8823#endif
8824#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
8825 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
8826 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
8827 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
8828#endif
8829}
8830
8831
8832/**
8833 * Performs some essential restoration of state after running guest code in
8834 * VT-x.
8835 *
8836 * @param pVM Pointer to the VM.
8837 * @param pVCpu Pointer to the VMCPU.
8838 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
8839 * out-of-sync. Make sure to update the required fields
8840 * before using them.
8841 * @param pVmxTransient Pointer to the VMX transient structure.
8842 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
8843 *
8844 * @remarks Called with interrupts disabled, and returns with interrups enabled!
8845 *
8846 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
8847 * unconditionally when it is safe to do so.
8848 */
8849static void hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
8850{
8851 NOREF(pVM);
8852
8853 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8854
8855 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
8856 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
8857 HMVMXCPU_GST_RESET_TO(pVCpu, 0); /* Exits/longjmps to ring-3 requires saving the guest state. */
8858 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
8859 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
8860 pVmxTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
8861
8862 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8863 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset);
8864
8865 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
8866 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
8867 Assert(!ASMIntAreEnabled());
8868 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8869
8870#ifdef HMVMX_ALWAYS_SWAP_FPU_STATE
8871 if (CPUMIsGuestFPUStateActive(pVCpu))
8872 {
8873 hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8874 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
8875 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8876 }
8877#endif
8878
8879#if HC_ARCH_BITS == 64
8880 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Host state messed up by VT-x, we must restore. */
8881#endif
8882 pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
8883#ifdef VBOX_STRICT
8884 hmR0VmxCheckHostEferMsr(pVCpu); /* Verify that VMRUN/VMLAUNCH didn't modify host EFER. */
8885#endif
8886 ASMSetFlags(pVmxTransient->fEFlags); /* Enable interrupts. */
8887 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
8888
8889 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
8890 uint32_t uExitReason;
8891 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
8892 rc |= hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
8893 AssertRC(rc);
8894 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
8895 pVmxTransient->fVMEntryFailed = VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uEntryIntInfo);
8896
8897 /* Update the VM-exit history array. */
8898 HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmxTransient->uExitReason);
8899
8900 /* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */
8901 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
8902 {
8903 Log4(("VM-entry failure: pVCpu=%p idCpu=%RU32 rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", pVCpu, pVCpu->idCpu, rcVMRun,
8904 pVmxTransient->fVMEntryFailed));
8905 return;
8906 }
8907
8908 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
8909 {
8910 /** @todo We can optimize this by only syncing with our force-flags when
8911 * really needed and keeping the VMCS state as it is for most
8912 * VM-exits. */
8913 /* Update the guest interruptibility-state from the VMCS. */
8914 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
8915
8916#if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
8917 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8918 AssertRC(rc);
8919#elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
8920 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8921 AssertRC(rc);
8922#endif
8923
8924 /*
8925 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
8926 * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3 and which is
8927 * why it's done here as it's easier and no less efficient to deal with it here than making hmR0VmxSaveGuestState()
8928 * cope with longjmps safely (see VMCPU_FF_HM_UPDATE_CR3 handling).
8929 */
8930 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8931 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
8932 {
8933 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
8934 AssertRC(rc);
8935 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
8936 }
8937 }
8938}
8939
8940
8941/**
8942 * Runs the guest code using VT-x the normal way.
8943 *
8944 * @returns VBox status code.
8945 * @param pVM Pointer to the VM.
8946 * @param pVCpu Pointer to the VMCPU.
8947 * @param pCtx Pointer to the guest-CPU context.
8948 *
8949 * @note Mostly the same as hmR0VmxRunGuestCodeStep().
8950 */
8951static int hmR0VmxRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8952{
8953 VMXTRANSIENT VmxTransient;
8954 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
8955 int rc = VERR_INTERNAL_ERROR_5;
8956 uint32_t cLoops = 0;
8957
8958 for (;; cLoops++)
8959 {
8960 Assert(!HMR0SuspendPending());
8961 HMVMX_ASSERT_CPU_SAFE();
8962
8963 /* Preparatory work for running guest code, this may force us to return
8964 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
8965 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
8966 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, false /* fStepping */);
8967 if (rc != VINF_SUCCESS)
8968 break;
8969
8970 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
8971 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
8972 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
8973
8974 /* Restore any residual host-state and save any bits shared between host
8975 and guest into the guest-CPU state. Re-enables interrupts! */
8976 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
8977
8978 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
8979 if (RT_UNLIKELY(rc != VINF_SUCCESS))
8980 {
8981 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
8982 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
8983 return rc;
8984 }
8985
8986 /* Profile the VM-exit. */
8987 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
8988 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
8989 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
8990 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
8991 HMVMX_START_EXIT_DISPATCH_PROF();
8992
8993 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
8994 if (RT_UNLIKELY(VBOXVMM_R0_HMVMX_VMEXIT_ENABLED()))
8995 {
8996 hmR0VmxReadExitQualificationVmcs(pVCpu, &VmxTransient);
8997 hmR0VmxSaveGuestState(pVCpu, pCtx);
8998 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pCtx, VmxTransient.uExitReason, VmxTransient.uExitQualification);
8999 }
9000
9001 /* Handle the VM-exit. */
9002#ifdef HMVMX_USE_FUNCTION_TABLE
9003 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
9004#else
9005 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
9006#endif
9007 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
9008 if (rc != VINF_SUCCESS)
9009 break;
9010 if (cLoops > pVM->hm.s.cMaxResumeLoops)
9011 {
9012 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
9013 rc = VINF_EM_RAW_INTERRUPT;
9014 break;
9015 }
9016 }
9017
9018 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
9019 return rc;
9020}
9021
9022
9023/**
9024 * Single steps guest code using VT-x.
9025 *
9026 * @returns VBox status code.
9027 * @param pVM Pointer to the VM.
9028 * @param pVCpu Pointer to the VMCPU.
9029 * @param pCtx Pointer to the guest-CPU context.
9030 *
9031 * @note Mostly the same as hmR0VmxRunGuestCodeNormal().
9032 */
9033static int hmR0VmxRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
9034{
9035 VMXTRANSIENT VmxTransient;
9036 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
9037 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
9038 uint32_t cLoops = 0;
9039 uint16_t uCsStart = pCtx->cs.Sel;
9040 uint64_t uRipStart = pCtx->rip;
9041
9042 for (;; cLoops++)
9043 {
9044 Assert(!HMR0SuspendPending());
9045 HMVMX_ASSERT_CPU_SAFE();
9046
9047 /* Preparatory work for running guest code, this may force us to return
9048 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
9049 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
9050 rcStrict = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, true /* fStepping */);
9051 if (rcStrict != VINF_SUCCESS)
9052 break;
9053
9054 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
9055 rcStrict = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
9056 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
9057
9058 /* Restore any residual host-state and save any bits shared between host
9059 and guest into the guest-CPU state. Re-enables interrupts! */
9060 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, VBOXSTRICTRC_TODO(rcStrict));
9061
9062 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
9063 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
9064 {
9065 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
9066 hmR0VmxReportWorldSwitchError(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict), pCtx, &VmxTransient);
9067 return VBOXSTRICTRC_TODO(rcStrict);
9068 }
9069
9070 /* Profile the VM-exit. */
9071 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
9072 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
9073 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
9074 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
9075 HMVMX_START_EXIT_DISPATCH_PROF();
9076
9077 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
9078 if (RT_UNLIKELY(VBOXVMM_R0_HMVMX_VMEXIT_ENABLED()))
9079 {
9080 hmR0VmxReadExitQualificationVmcs(pVCpu, &VmxTransient);
9081 hmR0VmxSaveGuestState(pVCpu, pCtx);
9082 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pCtx, VmxTransient.uExitReason, VmxTransient.uExitQualification);
9083 }
9084
9085 /* Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitStep(). */
9086 rcStrict = hmR0VmxHandleExitStep(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, uCsStart, uRipStart);
9087 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
9088 if (rcStrict != VINF_SUCCESS)
9089 break;
9090 if (cLoops > pVM->hm.s.cMaxResumeLoops)
9091 {
9092 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
9093 rcStrict = VINF_EM_RAW_INTERRUPT;
9094 break;
9095 }
9096
9097 /*
9098 * Did the RIP change, if so, consider it a single step.
9099 * Otherwise, make sure one of the TFs gets set.
9100 */
9101 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pCtx);
9102 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pCtx);
9103 AssertRCReturn(rc2, rc2);
9104 if ( pCtx->rip != uRipStart
9105 || pCtx->cs.Sel != uCsStart)
9106 {
9107 rcStrict = VINF_EM_DBG_STEPPED;
9108 break;
9109 }
9110 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
9111 }
9112
9113 /*
9114 * Clear the X86_EFL_TF if necessary.
9115 */
9116 if (pVCpu->hm.s.fClearTrapFlag)
9117 {
9118 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pCtx);
9119 AssertRCReturn(rc2, rc2);
9120 pVCpu->hm.s.fClearTrapFlag = false;
9121 pCtx->eflags.Bits.u1TF = 0;
9122 }
9123 /** @todo there seems to be issues with the resume flag when the monitor trap
9124 * flag is pending without being used. Seen early in bios init when
9125 * accessing APIC page in protected mode. */
9126
9127 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
9128 return VBOXSTRICTRC_TODO(rcStrict);
9129}
9130
9131
9132/**
9133 * Runs the guest code using VT-x.
9134 *
9135 * @returns VBox status code.
9136 * @param pVM Pointer to the VM.
9137 * @param pVCpu Pointer to the VMCPU.
9138 * @param pCtx Pointer to the guest-CPU context.
9139 */
9140VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
9141{
9142 Assert(VMMRZCallRing3IsEnabled(pVCpu));
9143 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
9144 HMVMX_ASSERT_PREEMPT_SAFE();
9145
9146 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pCtx);
9147
9148 int rc;
9149 if (!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu))
9150 rc = hmR0VmxRunGuestCodeNormal(pVM, pVCpu, pCtx);
9151 else
9152 rc = hmR0VmxRunGuestCodeStep(pVM, pVCpu, pCtx);
9153
9154 if (rc == VERR_EM_INTERPRETER)
9155 rc = VINF_EM_RAW_EMULATE_INSTR;
9156 else if (rc == VINF_EM_RESET)
9157 rc = VINF_EM_TRIPLE_FAULT;
9158
9159 int rc2 = hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rc);
9160 if (RT_FAILURE(rc2))
9161 {
9162 pVCpu->hm.s.u32HMError = rc;
9163 rc = rc2;
9164 }
9165 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
9166 return rc;
9167}
9168
9169
9170#ifndef HMVMX_USE_FUNCTION_TABLE
9171DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
9172{
9173#ifdef DEBUG_ramshankar
9174# define SVVMCS() do { int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); AssertRC(rc2); } while (0)
9175# define LDVMCS() do { HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); } while (0)
9176#endif
9177 int rc;
9178 switch (rcReason)
9179 {
9180 case VMX_EXIT_EPT_MISCONFIG: /* SVVMCS(); */ rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9181 case VMX_EXIT_EPT_VIOLATION: /* SVVMCS(); */ rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9182 case VMX_EXIT_IO_INSTR: /* SVVMCS(); */ rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9183 case VMX_EXIT_CPUID: /* SVVMCS(); */ rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9184 case VMX_EXIT_RDTSC: /* SVVMCS(); */ rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9185 case VMX_EXIT_RDTSCP: /* SVVMCS(); */ rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9186 case VMX_EXIT_APIC_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9187 case VMX_EXIT_XCPT_OR_NMI: /* SVVMCS(); */ rc = hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9188 case VMX_EXIT_MOV_CRX: /* SVVMCS(); */ rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9189 case VMX_EXIT_EXT_INT: /* SVVMCS(); */ rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9190 case VMX_EXIT_INT_WINDOW: /* SVVMCS(); */ rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9191 case VMX_EXIT_MWAIT: /* SVVMCS(); */ rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9192 case VMX_EXIT_MONITOR: /* SVVMCS(); */ rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9193 case VMX_EXIT_TASK_SWITCH: /* SVVMCS(); */ rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9194 case VMX_EXIT_PREEMPT_TIMER: /* SVVMCS(); */ rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9195 case VMX_EXIT_RDMSR: /* SVVMCS(); */ rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9196 case VMX_EXIT_WRMSR: /* SVVMCS(); */ rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9197 case VMX_EXIT_MOV_DRX: /* SVVMCS(); */ rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9198 case VMX_EXIT_TPR_BELOW_THRESHOLD: /* SVVMCS(); */ rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9199 case VMX_EXIT_HLT: /* SVVMCS(); */ rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9200 case VMX_EXIT_INVD: /* SVVMCS(); */ rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9201 case VMX_EXIT_INVLPG: /* SVVMCS(); */ rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9202 case VMX_EXIT_RSM: /* SVVMCS(); */ rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9203 case VMX_EXIT_MTF: /* SVVMCS(); */ rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9204 case VMX_EXIT_PAUSE: /* SVVMCS(); */ rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9205 case VMX_EXIT_XDTR_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9206 case VMX_EXIT_TR_ACCESS: /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9207 case VMX_EXIT_WBINVD: /* SVVMCS(); */ rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9208 case VMX_EXIT_XSETBV: /* SVVMCS(); */ rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9209 case VMX_EXIT_RDRAND: /* SVVMCS(); */ rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9210 case VMX_EXIT_INVPCID: /* SVVMCS(); */ rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9211 case VMX_EXIT_GETSEC: /* SVVMCS(); */ rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9212 case VMX_EXIT_RDPMC: /* SVVMCS(); */ rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9213 case VMX_EXIT_VMCALL: /* SVVMCS(); */ rc = hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
9214
9215 case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break;
9216 case VMX_EXIT_NMI_WINDOW: rc = hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient); break;
9217 case VMX_EXIT_INIT_SIGNAL: rc = hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient); break;
9218 case VMX_EXIT_SIPI: rc = hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient); break;
9219 case VMX_EXIT_IO_SMI: rc = hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient); break;
9220 case VMX_EXIT_SMI: rc = hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient); break;
9221 case VMX_EXIT_ERR_MSR_LOAD: rc = hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient); break;
9222 case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient); break;
9223 case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient); break;
9224
9225 case VMX_EXIT_VMCLEAR:
9226 case VMX_EXIT_VMLAUNCH:
9227 case VMX_EXIT_VMPTRLD:
9228 case VMX_EXIT_VMPTRST:
9229 case VMX_EXIT_VMREAD:
9230 case VMX_EXIT_VMRESUME:
9231 case VMX_EXIT_VMWRITE:
9232 case VMX_EXIT_VMXOFF:
9233 case VMX_EXIT_VMXON:
9234 case VMX_EXIT_INVEPT:
9235 case VMX_EXIT_INVVPID:
9236 case VMX_EXIT_VMFUNC:
9237 case VMX_EXIT_XSAVES:
9238 case VMX_EXIT_XRSTORS:
9239 rc = hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
9240 break;
9241 case VMX_EXIT_RESERVED_60:
9242 case VMX_EXIT_RDSEED: /* only spurious exits, so undefined */
9243 case VMX_EXIT_RESERVED_62:
9244 default:
9245 rc = hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
9246 break;
9247 }
9248 return rc;
9249}
9250#endif /* !HMVMX_USE_FUNCTION_TABLE */
9251
9252
9253/**
9254 * Single-stepping VM-exit filtering.
9255 *
9256 * This is preprocessing the exits and deciding whether we've gotten far enough
9257 * to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit handling is
9258 * performed.
9259 *
9260 * @returns Strict VBox status code.
9261 * @param pVCpu The virtual CPU of the calling EMT.
9262 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
9263 * out-of-sync. Make sure to update the required
9264 * fields before using them.
9265 * @param pVmxTransient Pointer to the VMX-transient structure.
9266 * @param uExitReason The VM-exit reason.
9267 */
9268DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitStep(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
9269 uint32_t uExitReason, uint16_t uCsStart, uint64_t uRipStart)
9270{
9271 switch (uExitReason)
9272 {
9273 case VMX_EXIT_XCPT_OR_NMI:
9274 {
9275 /* Check for host NMI. */
9276 int rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
9277 AssertRCReturn(rc2, rc2);
9278 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
9279 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9280 return hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient);
9281 /* fall thru */
9282 }
9283
9284 case VMX_EXIT_EPT_MISCONFIG:
9285 case VMX_EXIT_TRIPLE_FAULT:
9286 case VMX_EXIT_APIC_ACCESS:
9287 case VMX_EXIT_TPR_BELOW_THRESHOLD:
9288 case VMX_EXIT_TASK_SWITCH:
9289
9290 /* Instruction specific VM-exits: */
9291 case VMX_EXIT_IO_INSTR:
9292 case VMX_EXIT_CPUID:
9293 case VMX_EXIT_RDTSC:
9294 case VMX_EXIT_RDTSCP:
9295 case VMX_EXIT_MOV_CRX:
9296 case VMX_EXIT_MWAIT:
9297 case VMX_EXIT_MONITOR:
9298 case VMX_EXIT_RDMSR:
9299 case VMX_EXIT_WRMSR:
9300 case VMX_EXIT_MOV_DRX:
9301 case VMX_EXIT_HLT:
9302 case VMX_EXIT_INVD:
9303 case VMX_EXIT_INVLPG:
9304 case VMX_EXIT_RSM:
9305 case VMX_EXIT_PAUSE:
9306 case VMX_EXIT_XDTR_ACCESS:
9307 case VMX_EXIT_TR_ACCESS:
9308 case VMX_EXIT_WBINVD:
9309 case VMX_EXIT_XSETBV:
9310 case VMX_EXIT_RDRAND:
9311 case VMX_EXIT_INVPCID:
9312 case VMX_EXIT_GETSEC:
9313 case VMX_EXIT_RDPMC:
9314 case VMX_EXIT_VMCALL:
9315 case VMX_EXIT_VMCLEAR:
9316 case VMX_EXIT_VMLAUNCH:
9317 case VMX_EXIT_VMPTRLD:
9318 case VMX_EXIT_VMPTRST:
9319 case VMX_EXIT_VMREAD:
9320 case VMX_EXIT_VMRESUME:
9321 case VMX_EXIT_VMWRITE:
9322 case VMX_EXIT_VMXOFF:
9323 case VMX_EXIT_VMXON:
9324 case VMX_EXIT_INVEPT:
9325 case VMX_EXIT_INVVPID:
9326 case VMX_EXIT_VMFUNC:
9327 {
9328 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
9329 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9330 AssertRCReturn(rc2, rc2);
9331 if ( pMixedCtx->rip != uRipStart
9332 || pMixedCtx->cs.Sel != uCsStart)
9333 return VINF_EM_DBG_STEPPED;
9334 break;
9335 }
9336 }
9337
9338 /*
9339 * Normal processing.
9340 */
9341#ifdef HMVMX_USE_FUNCTION_TABLE
9342 return g_apfnVMExitHandlers[uExitReason](pVCpu, pMixedCtx, pVmxTransient);
9343#else
9344 return hmR0VmxHandleExit(pVCpu, pMixedCtx, pVmxTransient, uExitReason);
9345#endif
9346}
9347
9348
9349#ifdef DEBUG
9350/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
9351# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
9352 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
9353
9354# define HMVMX_ASSERT_PREEMPT_CPUID() \
9355 do \
9356 { \
9357 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
9358 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
9359 } while (0)
9360
9361# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
9362 do { \
9363 AssertPtr(pVCpu); \
9364 AssertPtr(pMixedCtx); \
9365 AssertPtr(pVmxTransient); \
9366 Assert(pVmxTransient->fVMEntryFailed == false); \
9367 Assert(ASMIntAreEnabled()); \
9368 HMVMX_ASSERT_PREEMPT_SAFE(); \
9369 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
9370 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", pVCpu->idCpu)); \
9371 HMVMX_ASSERT_PREEMPT_SAFE(); \
9372 if (VMMR0IsLogFlushDisabled(pVCpu)) \
9373 HMVMX_ASSERT_PREEMPT_CPUID(); \
9374 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
9375 } while (0)
9376
9377# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
9378 do { \
9379 Log4Func(("\n")); \
9380 } while (0)
9381#else /* Release builds */
9382# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
9383 do { \
9384 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
9385 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient); \
9386 } while (0)
9387# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while (0)
9388#endif
9389
9390
9391/**
9392 * Advances the guest RIP after reading it from the VMCS.
9393 *
9394 * @returns VBox status code.
9395 * @param pVCpu Pointer to the VMCPU.
9396 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
9397 * out-of-sync. Make sure to update the required fields
9398 * before using them.
9399 * @param pVmxTransient Pointer to the VMX transient structure.
9400 *
9401 * @remarks No-long-jump zone!!!
9402 */
9403DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9404{
9405 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
9406 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
9407 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
9408 AssertRCReturn(rc, rc);
9409
9410 pMixedCtx->rip += pVmxTransient->cbInstr;
9411 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
9412
9413 /*
9414 * Deliver a debug exception to the guest if it is single-stepping. Don't directly inject a #DB but use the
9415 * pending debug exception field as it takes care of priority of events.
9416 *
9417 * See Intel spec. 32.2.1 "Debug Exceptions".
9418 */
9419 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
9420
9421 return rc;
9422}
9423
9424
9425/**
9426 * Tries to determine what part of the guest-state VT-x has deemed as invalid
9427 * and update error record fields accordingly.
9428 *
9429 * @return VMX_IGS_* return codes.
9430 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
9431 * wrong with the guest state.
9432 *
9433 * @param pVM Pointer to the VM.
9434 * @param pVCpu Pointer to the VMCPU.
9435 * @param pCtx Pointer to the guest-CPU state.
9436 *
9437 * @remarks This function assumes our cache of the VMCS controls
9438 * are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
9439 */
9440static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
9441{
9442#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
9443#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \
9444 uError = (err); \
9445 break; \
9446 } else do { } while (0)
9447
9448 int rc;
9449 uint32_t uError = VMX_IGS_ERROR;
9450 uint32_t u32Val;
9451 bool fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
9452
9453 do
9454 {
9455 /*
9456 * CR0.
9457 */
9458 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
9459 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
9460 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG).
9461 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
9462 if (fUnrestrictedGuest)
9463 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
9464
9465 uint32_t u32GuestCR0;
9466 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCR0);
9467 AssertRCBreak(rc);
9468 HMVMX_CHECK_BREAK((u32GuestCR0 & uSetCR0) == uSetCR0, VMX_IGS_CR0_FIXED1);
9469 HMVMX_CHECK_BREAK(!(u32GuestCR0 & ~uZapCR0), VMX_IGS_CR0_FIXED0);
9470 if ( !fUnrestrictedGuest
9471 && (u32GuestCR0 & X86_CR0_PG)
9472 && !(u32GuestCR0 & X86_CR0_PE))
9473 {
9474 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
9475 }
9476
9477 /*
9478 * CR4.
9479 */
9480 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
9481 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
9482
9483 uint32_t u32GuestCR4;
9484 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCR4);
9485 AssertRCBreak(rc);
9486 HMVMX_CHECK_BREAK((u32GuestCR4 & uSetCR4) == uSetCR4, VMX_IGS_CR4_FIXED1);
9487 HMVMX_CHECK_BREAK(!(u32GuestCR4 & ~uZapCR4), VMX_IGS_CR4_FIXED0);
9488
9489 /*
9490 * IA32_DEBUGCTL MSR.
9491 */
9492 uint64_t u64Val;
9493 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
9494 AssertRCBreak(rc);
9495 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
9496 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
9497 {
9498 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
9499 }
9500 uint64_t u64DebugCtlMsr = u64Val;
9501
9502#ifdef VBOX_STRICT
9503 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
9504 AssertRCBreak(rc);
9505 Assert(u32Val == pVCpu->hm.s.vmx.u32EntryCtls);
9506#endif
9507 bool const fLongModeGuest = RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
9508
9509 /*
9510 * RIP and RFLAGS.
9511 */
9512 uint32_t u32Eflags;
9513#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9514 if (HMVMX_IS_64BIT_HOST_MODE())
9515 {
9516 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
9517 AssertRCBreak(rc);
9518 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
9519 if ( !fLongModeGuest
9520 || !pCtx->cs.Attr.n.u1Long)
9521 {
9522 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
9523 }
9524 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
9525 * must be identical if the "IA-32e mode guest" VM-entry
9526 * control is 1 and CS.L is 1. No check applies if the
9527 * CPU supports 64 linear-address bits. */
9528
9529 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
9530 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
9531 AssertRCBreak(rc);
9532 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
9533 VMX_IGS_RFLAGS_RESERVED);
9534 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
9535 u32Eflags = u64Val;
9536 }
9537 else
9538#endif
9539 {
9540 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
9541 AssertRCBreak(rc);
9542 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */
9543 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
9544 }
9545
9546 if ( fLongModeGuest
9547 || ( fUnrestrictedGuest
9548 && !(u32GuestCR0 & X86_CR0_PE)))
9549 {
9550 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
9551 }
9552
9553 uint32_t u32EntryInfo;
9554 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
9555 AssertRCBreak(rc);
9556 if ( VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
9557 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
9558 {
9559 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
9560 }
9561
9562 /*
9563 * 64-bit checks.
9564 */
9565#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9566 if (HMVMX_IS_64BIT_HOST_MODE())
9567 {
9568 if (fLongModeGuest)
9569 {
9570 HMVMX_CHECK_BREAK(u32GuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
9571 HMVMX_CHECK_BREAK(u32GuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
9572 }
9573
9574 if ( !fLongModeGuest
9575 && (u32GuestCR4 & X86_CR4_PCIDE))
9576 {
9577 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
9578 }
9579
9580 /** @todo CR3 field must be such that bits 63:52 and bits in the range
9581 * 51:32 beyond the processor's physical-address width are 0. */
9582
9583 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
9584 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
9585 {
9586 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
9587 }
9588
9589 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
9590 AssertRCBreak(rc);
9591 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
9592
9593 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
9594 AssertRCBreak(rc);
9595 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
9596 }
9597#endif
9598
9599 /*
9600 * PERF_GLOBAL MSR.
9601 */
9602 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR)
9603 {
9604 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
9605 AssertRCBreak(rc);
9606 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
9607 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
9608 }
9609
9610 /*
9611 * PAT MSR.
9612 */
9613 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR)
9614 {
9615 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
9616 AssertRCBreak(rc);
9617 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
9618 for (unsigned i = 0; i < 8; i++)
9619 {
9620 uint8_t u8Val = (u64Val & 0xff);
9621 if ( u8Val != 0 /* UC */
9622 && u8Val != 1 /* WC */
9623 && u8Val != 4 /* WT */
9624 && u8Val != 5 /* WP */
9625 && u8Val != 6 /* WB */
9626 && u8Val != 7 /* UC- */)
9627 {
9628 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
9629 }
9630 u64Val >>= 8;
9631 }
9632 }
9633
9634 /*
9635 * EFER MSR.
9636 */
9637 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
9638 {
9639 Assert(pVM->hm.s.vmx.fSupportsVmcsEfer);
9640 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
9641 AssertRCBreak(rc);
9642 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
9643 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
9644 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
9645 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
9646 HMVMX_CHECK_BREAK( fUnrestrictedGuest
9647 || !(u32GuestCR0 & X86_CR0_PG)
9648 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
9649 VMX_IGS_EFER_LMA_LME_MISMATCH);
9650 }
9651
9652 /*
9653 * Segment registers.
9654 */
9655 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9656 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
9657 if (!(u32Eflags & X86_EFL_VM))
9658 {
9659 /* CS */
9660 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
9661 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
9662 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
9663 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
9664 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9665 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
9666 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9667 /* CS cannot be loaded with NULL in protected mode. */
9668 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
9669 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
9670 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
9671 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
9672 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
9673 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
9674 else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
9675 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
9676 else
9677 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
9678
9679 /* SS */
9680 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9681 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
9682 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
9683 if ( !(pCtx->cr0 & X86_CR0_PE)
9684 || pCtx->cs.Attr.n.u4Type == 3)
9685 {
9686 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
9687 }
9688 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
9689 {
9690 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
9691 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
9692 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
9693 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
9694 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
9695 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9696 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
9697 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9698 }
9699
9700 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
9701 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
9702 {
9703 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
9704 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
9705 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9706 || pCtx->ds.Attr.n.u4Type > 11
9707 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9708 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
9709 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
9710 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
9711 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9712 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
9713 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9714 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9715 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
9716 }
9717 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
9718 {
9719 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
9720 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
9721 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9722 || pCtx->es.Attr.n.u4Type > 11
9723 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9724 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
9725 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
9726 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
9727 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9728 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
9729 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9730 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9731 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
9732 }
9733 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
9734 {
9735 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
9736 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
9737 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9738 || pCtx->fs.Attr.n.u4Type > 11
9739 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
9740 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
9741 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
9742 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
9743 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
9744 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
9745 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
9746 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9747 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
9748 }
9749 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
9750 {
9751 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
9752 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
9753 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9754 || pCtx->gs.Attr.n.u4Type > 11
9755 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
9756 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
9757 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
9758 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
9759 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
9760 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
9761 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
9762 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9763 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
9764 }
9765 /* 64-bit capable CPUs. */
9766#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9767 if (HMVMX_IS_64BIT_HOST_MODE())
9768 {
9769 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
9770 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
9771 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9772 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
9773 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
9774 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
9775 VMX_IGS_LONGMODE_SS_BASE_INVALID);
9776 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
9777 VMX_IGS_LONGMODE_DS_BASE_INVALID);
9778 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
9779 VMX_IGS_LONGMODE_ES_BASE_INVALID);
9780 }
9781#endif
9782 }
9783 else
9784 {
9785 /* V86 mode checks. */
9786 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
9787 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
9788 {
9789 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
9790 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
9791 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
9792 }
9793 else
9794 {
9795 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
9796 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
9797 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
9798 }
9799
9800 /* CS */
9801 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
9802 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
9803 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
9804 /* SS */
9805 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
9806 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
9807 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
9808 /* DS */
9809 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
9810 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
9811 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
9812 /* ES */
9813 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
9814 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
9815 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
9816 /* FS */
9817 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
9818 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
9819 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
9820 /* GS */
9821 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
9822 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
9823 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
9824 /* 64-bit capable CPUs. */
9825#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9826 if (HMVMX_IS_64BIT_HOST_MODE())
9827 {
9828 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
9829 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
9830 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9831 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
9832 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
9833 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
9834 VMX_IGS_LONGMODE_SS_BASE_INVALID);
9835 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
9836 VMX_IGS_LONGMODE_DS_BASE_INVALID);
9837 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
9838 VMX_IGS_LONGMODE_ES_BASE_INVALID);
9839 }
9840#endif
9841 }
9842
9843 /*
9844 * TR.
9845 */
9846 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
9847 /* 64-bit capable CPUs. */
9848#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9849 if (HMVMX_IS_64BIT_HOST_MODE())
9850 {
9851 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
9852 }
9853#endif
9854 if (fLongModeGuest)
9855 {
9856 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
9857 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
9858 }
9859 else
9860 {
9861 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
9862 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
9863 VMX_IGS_TR_ATTR_TYPE_INVALID);
9864 }
9865 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
9866 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
9867 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
9868 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
9869 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
9870 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
9871 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
9872 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
9873
9874 /*
9875 * GDTR and IDTR.
9876 */
9877#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
9878 if (HMVMX_IS_64BIT_HOST_MODE())
9879 {
9880 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
9881 AssertRCBreak(rc);
9882 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
9883
9884 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
9885 AssertRCBreak(rc);
9886 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
9887 }
9888#endif
9889
9890 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
9891 AssertRCBreak(rc);
9892 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
9893
9894 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
9895 AssertRCBreak(rc);
9896 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
9897
9898 /*
9899 * Guest Non-Register State.
9900 */
9901 /* Activity State. */
9902 uint32_t u32ActivityState;
9903 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
9904 AssertRCBreak(rc);
9905 HMVMX_CHECK_BREAK( !u32ActivityState
9906 || (u32ActivityState & MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.Msrs.u64Misc)),
9907 VMX_IGS_ACTIVITY_STATE_INVALID);
9908 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
9909 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
9910 uint32_t u32IntrState;
9911 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32IntrState);
9912 AssertRCBreak(rc);
9913 if ( u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
9914 || u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9915 {
9916 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
9917 }
9918
9919 /** @todo Activity state and injecting interrupts. Left as a todo since we
9920 * currently don't use activity states but ACTIVE. */
9921
9922 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
9923 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
9924
9925 /* Guest interruptibility-state. */
9926 HMVMX_CHECK_BREAK(!(u32IntrState & 0xfffffff0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
9927 HMVMX_CHECK_BREAK((u32IntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
9928 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS))
9929 != ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
9930 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9931 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
9932 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
9933 || !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
9934 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
9935 if (VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo))
9936 {
9937 if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
9938 {
9939 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9940 && !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9941 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
9942 }
9943 else if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9944 {
9945 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
9946 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
9947 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
9948 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
9949 }
9950 }
9951 /** @todo Assumes the processor is not in SMM. */
9952 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
9953 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
9954 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
9955 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
9956 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
9957 if ( (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
9958 && VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
9959 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9960 {
9961 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI),
9962 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
9963 }
9964
9965 /* Pending debug exceptions. */
9966 if (HMVMX_IS_64BIT_HOST_MODE())
9967 {
9968 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val);
9969 AssertRCBreak(rc);
9970 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
9971 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
9972 u32Val = u64Val; /* For pending debug exceptions checks below. */
9973 }
9974 else
9975 {
9976 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val);
9977 AssertRCBreak(rc);
9978 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
9979 HMVMX_CHECK_BREAK(!(u64Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
9980 }
9981
9982 if ( (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
9983 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS)
9984 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
9985 {
9986 if ( (u32Eflags & X86_EFL_TF)
9987 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
9988 {
9989 /* Bit 14 is PendingDebug.BS. */
9990 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
9991 }
9992 if ( !(u32Eflags & X86_EFL_TF)
9993 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
9994 {
9995 /* Bit 14 is PendingDebug.BS. */
9996 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
9997 }
9998 }
9999
10000 /* VMCS link pointer. */
10001 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
10002 AssertRCBreak(rc);
10003 if (u64Val != UINT64_C(0xffffffffffffffff))
10004 {
10005 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
10006 /** @todo Bits beyond the processor's physical-address width MBZ. */
10007 /** @todo 32-bit located in memory referenced by value of this field (as a
10008 * physical address) must contain the processor's VMCS revision ID. */
10009 /** @todo SMM checks. */
10010 }
10011
10012 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
10013 * not using Nested Paging? */
10014 if ( pVM->hm.s.fNestedPaging
10015 && !fLongModeGuest
10016 && CPUMIsGuestInPAEModeEx(pCtx))
10017 {
10018 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
10019 AssertRCBreak(rc);
10020 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10021
10022 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
10023 AssertRCBreak(rc);
10024 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10025
10026 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
10027 AssertRCBreak(rc);
10028 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10029
10030 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
10031 AssertRCBreak(rc);
10032 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10033 }
10034
10035 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
10036 if (uError == VMX_IGS_ERROR)
10037 uError = VMX_IGS_REASON_NOT_FOUND;
10038 } while (0);
10039
10040 pVCpu->hm.s.u32HMError = uError;
10041 return uError;
10042
10043#undef HMVMX_ERROR_BREAK
10044#undef HMVMX_CHECK_BREAK
10045}
10046
10047/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10048/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
10049/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10050
10051/** @name VM-exit handlers.
10052 * @{
10053 */
10054
10055/**
10056 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
10057 */
10058HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10059{
10060 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10061 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
10062 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
10063 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
10064 return VINF_SUCCESS;
10065 return VINF_EM_RAW_INTERRUPT;
10066}
10067
10068
10069/**
10070 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
10071 */
10072HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10073{
10074 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10075 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
10076
10077 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
10078 AssertRCReturn(rc, rc);
10079
10080 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
10081 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)
10082 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
10083 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
10084
10085 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
10086 {
10087 /*
10088 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we injected it ourselves and
10089 * anything we inject is not going to cause a VM-exit directly for the event being injected.
10090 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
10091 *
10092 * Dispatch the NMI to the host. See Intel spec. 27.5.5 "Updating Non-Register State".
10093 */
10094 VMXDispatchHostNmi();
10095 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
10096 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
10097 return VINF_SUCCESS;
10098 }
10099
10100 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
10101 rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
10102 if (RT_UNLIKELY(rc != VINF_SUCCESS))
10103 {
10104 if (rc == VINF_HM_DOUBLE_FAULT)
10105 rc = VINF_SUCCESS;
10106 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
10107 return rc;
10108 }
10109
10110 uint32_t uExitIntInfo = pVmxTransient->uExitIntInfo;
10111 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntInfo);
10112 switch (uIntType)
10113 {
10114 case VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT: /* Privileged software exception. (#DB from ICEBP) */
10115 Assert(uVector == X86_XCPT_DB);
10116 /* no break */
10117 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
10118 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT);
10119 /* no break */
10120 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
10121 {
10122 switch (uVector)
10123 {
10124 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
10125 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
10126 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;
10127 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
10128 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
10129 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
10130#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
10131 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
10132 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10133 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
10134 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10135 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
10136 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10137 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
10138 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10139 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
10140 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10141 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS);
10142 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
10143#endif
10144 default:
10145 {
10146 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10147 AssertRCReturn(rc, rc);
10148
10149 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
10150 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
10151 {
10152 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
10153 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
10154 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
10155
10156 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
10157 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
10158 AssertRCReturn(rc, rc);
10159 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
10160 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode,
10161 0 /* GCPtrFaultAddress */);
10162 AssertRCReturn(rc, rc);
10163 }
10164 else
10165 {
10166 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
10167 pVCpu->hm.s.u32HMError = uVector;
10168 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
10169 }
10170 break;
10171 }
10172 }
10173 break;
10174 }
10175
10176 default:
10177 {
10178 pVCpu->hm.s.u32HMError = uExitIntInfo;
10179 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
10180 AssertMsgFailed(("Unexpected interruption info %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntInfo)));
10181 break;
10182 }
10183 }
10184 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
10185 return rc;
10186}
10187
10188
10189/**
10190 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
10191 */
10192HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10193{
10194 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10195
10196 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
10197 hmR0VmxClearIntWindowExitVmcs(pVCpu);
10198
10199 /* Deliver the pending interrupts via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
10200 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
10201 return VINF_SUCCESS;
10202}
10203
10204
10205/**
10206 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
10207 */
10208HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10209{
10210 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10211 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)))
10212 {
10213 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
10214 HMVMX_RETURN_UNEXPECTED_EXIT();
10215 }
10216
10217 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));
10218
10219 /*
10220 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
10221 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
10222 */
10223 uint32_t uIntrState = 0;
10224 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
10225 AssertRCReturn(rc, rc);
10226
10227 bool const fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
10228 if ( fBlockSti
10229 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
10230 {
10231 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
10232 }
10233
10234 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
10235 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
10236
10237 /* Deliver the pending NMI via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
10238 return VINF_SUCCESS;
10239}
10240
10241
10242/**
10243 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
10244 */
10245HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10246{
10247 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10248 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
10249 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10250}
10251
10252
10253/**
10254 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
10255 */
10256HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10257{
10258 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10259 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
10260 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10261}
10262
10263
10264/**
10265 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
10266 */
10267HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10268{
10269 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10270 PVM pVM = pVCpu->CTX_SUFF(pVM);
10271 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10272 if (RT_LIKELY(rc == VINF_SUCCESS))
10273 {
10274 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10275 Assert(pVmxTransient->cbInstr == 2);
10276 }
10277 else
10278 {
10279 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
10280 rc = VERR_EM_INTERPRETER;
10281 }
10282 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
10283 return rc;
10284}
10285
10286
10287/**
10288 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
10289 */
10290HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10291{
10292 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10293 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
10294 AssertRCReturn(rc, rc);
10295
10296 if (pMixedCtx->cr4 & X86_CR4_SMXE)
10297 return VINF_EM_RAW_EMULATE_INSTR;
10298
10299 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
10300 HMVMX_RETURN_UNEXPECTED_EXIT();
10301}
10302
10303
10304/**
10305 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
10306 */
10307HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10308{
10309 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10310 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10311 AssertRCReturn(rc, rc);
10312
10313 PVM pVM = pVCpu->CTX_SUFF(pVM);
10314 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10315 if (RT_LIKELY(rc == VINF_SUCCESS))
10316 {
10317 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10318 Assert(pVmxTransient->cbInstr == 2);
10319 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
10320 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
10321 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10322 }
10323 else
10324 rc = VERR_EM_INTERPRETER;
10325 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
10326 return rc;
10327}
10328
10329
10330/**
10331 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
10332 */
10333HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10334{
10335 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10336 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10337 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
10338 AssertRCReturn(rc, rc);
10339
10340 PVM pVM = pVCpu->CTX_SUFF(pVM);
10341 rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
10342 if (RT_LIKELY(rc == VINF_SUCCESS))
10343 {
10344 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10345 Assert(pVmxTransient->cbInstr == 3);
10346 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
10347 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
10348 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10349 }
10350 else
10351 {
10352 AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
10353 rc = VERR_EM_INTERPRETER;
10354 }
10355 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
10356 return rc;
10357}
10358
10359
10360/**
10361 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
10362 */
10363HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10364{
10365 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10366 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
10367 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
10368 AssertRCReturn(rc, rc);
10369
10370 PVM pVM = pVCpu->CTX_SUFF(pVM);
10371 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10372 if (RT_LIKELY(rc == VINF_SUCCESS))
10373 {
10374 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10375 Assert(pVmxTransient->cbInstr == 2);
10376 }
10377 else
10378 {
10379 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
10380 rc = VERR_EM_INTERPRETER;
10381 }
10382 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
10383 return rc;
10384}
10385
10386
10387/**
10388 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
10389 */
10390HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10391{
10392 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10393 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);
10394
10395 if (pVCpu->hm.s.fHypercallsEnabled)
10396 {
10397#if 0
10398 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10399 AssertRCReturn(rc, rc);
10400#else
10401 /* Aggressive state sync. for now. */
10402 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
10403 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* For long-mode checks in gimKvmHypercall(). */
10404#endif
10405 AssertRCReturn(rc, rc);
10406
10407 rc = GIMHypercall(pVCpu, pMixedCtx);
10408 if (RT_SUCCESS(rc))
10409 {
10410 /* If the hypercall changes anything other than guest general-purpose registers,
10411 we would need to reload the guest changed bits here before VM-reentry. */
10412 hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10413 return VINF_SUCCESS;
10414 }
10415 }
10416
10417 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
10418 return VINF_SUCCESS;
10419}
10420
10421
10422/**
10423 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
10424 */
10425HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10426{
10427 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10428 PVM pVM = pVCpu->CTX_SUFF(pVM);
10429 Assert(!pVM->hm.s.fNestedPaging);
10430
10431 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10432 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
10433 AssertRCReturn(rc, rc);
10434
10435 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
10436 rc = VBOXSTRICTRC_VAL(rc2);
10437 if (RT_LIKELY(rc == VINF_SUCCESS))
10438 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10439 else
10440 {
10441 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RX64 failed with %Rrc\n",
10442 pVmxTransient->uExitQualification, rc));
10443 }
10444 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
10445 return rc;
10446}
10447
10448
10449/**
10450 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
10451 */
10452HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10453{
10454 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10455 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10456 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10457 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10458 AssertRCReturn(rc, rc);
10459
10460 PVM pVM = pVCpu->CTX_SUFF(pVM);
10461 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10462 if (RT_LIKELY(rc == VINF_SUCCESS))
10463 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10464 else
10465 {
10466 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
10467 rc = VERR_EM_INTERPRETER;
10468 }
10469 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
10470 return rc;
10471}
10472
10473
10474/**
10475 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
10476 */
10477HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10478{
10479 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10480 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10481 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10482 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10483 AssertRCReturn(rc, rc);
10484
10485 PVM pVM = pVCpu->CTX_SUFF(pVM);
10486 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10487 rc = VBOXSTRICTRC_VAL(rc2);
10488 if (RT_LIKELY( rc == VINF_SUCCESS
10489 || rc == VINF_EM_HALT))
10490 {
10491 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10492 AssertRCReturn(rc3, rc3);
10493
10494 if ( rc == VINF_EM_HALT
10495 && EMMonitorWaitShouldContinue(pVCpu, pMixedCtx))
10496 {
10497 rc = VINF_SUCCESS;
10498 }
10499 }
10500 else
10501 {
10502 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
10503 rc = VERR_EM_INTERPRETER;
10504 }
10505 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
10506 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
10507 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
10508 return rc;
10509}
10510
10511
10512/**
10513 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
10514 */
10515HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10516{
10517 /*
10518 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
10519 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
10520 * executing VMCALL in VMX root operation. If we get here, something funny is going on.
10521 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
10522 */
10523 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10524 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10525 HMVMX_RETURN_UNEXPECTED_EXIT();
10526}
10527
10528
10529/**
10530 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
10531 */
10532HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10533{
10534 /*
10535 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
10536 * root operation. Only an STM (SMM transfer monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL
10537 * in VMX root mode or receive an SMI. If we get here, something funny is going on.
10538 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
10539 */
10540 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10541 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10542 HMVMX_RETURN_UNEXPECTED_EXIT();
10543}
10544
10545
10546/**
10547 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
10548 */
10549HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10550{
10551 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
10552 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10553 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10554 HMVMX_RETURN_UNEXPECTED_EXIT();
10555}
10556
10557
10558/**
10559 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
10560 */
10561HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10562{
10563 /*
10564 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
10565 * don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
10566 * See Intel spec. 25.3 "Other Causes of VM-exits".
10567 */
10568 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10569 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10570 HMVMX_RETURN_UNEXPECTED_EXIT();
10571}
10572
10573
10574/**
10575 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
10576 * VM-exit.
10577 */
10578HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10579{
10580 /*
10581 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
10582 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery" and Intel spec. 29.3 "VMX Instructions" for "VMXON".
10583 *
10584 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these VM-exits.
10585 * See Intel spec. "23.8 Restrictions on VMX operation".
10586 */
10587 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10588 return VINF_SUCCESS;
10589}
10590
10591
10592/**
10593 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
10594 * VM-exit.
10595 */
10596HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10597{
10598 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10599 return VINF_EM_RESET;
10600}
10601
10602
10603/**
10604 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
10605 */
10606HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10607{
10608 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10609 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
10610 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
10611 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10612 AssertRCReturn(rc, rc);
10613
10614 pMixedCtx->rip++;
10615 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
10616 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
10617 rc = VINF_SUCCESS;
10618 else
10619 rc = VINF_EM_HALT;
10620
10621 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
10622 if (rc != VINF_SUCCESS)
10623 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
10624 return rc;
10625}
10626
10627
10628/**
10629 * VM-exit handler for instructions that result in a #UD exception delivered to
10630 * the guest.
10631 */
10632HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10633{
10634 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10635 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
10636 return VINF_SUCCESS;
10637}
10638
10639
10640/**
10641 * VM-exit handler for expiry of the VMX preemption timer.
10642 */
10643HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10644{
10645 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10646
10647 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
10648 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10649
10650 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
10651 PVM pVM = pVCpu->CTX_SUFF(pVM);
10652 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
10653 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
10654 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
10655}
10656
10657
10658/**
10659 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
10660 */
10661HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10662{
10663 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10664
10665 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
10666 rc |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/);
10667 rc |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
10668 AssertRCReturn(rc, rc);
10669
10670 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbInstr);
10671 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);
10672
10673 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
10674
10675 return VBOXSTRICTRC_TODO(rcStrict);
10676}
10677
10678
10679/**
10680 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
10681 */
10682HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10683{
10684 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10685
10686 /* The guest should not invalidate the host CPU's TLBs, fallback to interpreter. */
10687 /** @todo implement EMInterpretInvpcid() */
10688 return VERR_EM_INTERPRETER;
10689}
10690
10691
10692/**
10693 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
10694 * Error VM-exit.
10695 */
10696HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10697{
10698 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10699 AssertRCReturn(rc, rc);
10700
10701 rc = hmR0VmxCheckVmcsCtls(pVCpu);
10702 AssertRCReturn(rc, rc);
10703
10704 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
10705 NOREF(uInvalidReason);
10706
10707#ifdef VBOX_STRICT
10708 uint32_t uIntrState;
10709 HMVMXHCUINTREG uHCReg;
10710 uint64_t u64Val;
10711 uint32_t u32Val;
10712
10713 rc = hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
10714 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
10715 rc |= hmR0VmxReadEntryInstrLenVmcs(pVmxTransient);
10716 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
10717 AssertRCReturn(rc, rc);
10718
10719 Log4(("uInvalidReason %u\n", uInvalidReason));
10720 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
10721 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
10722 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
10723 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));
10724
10725 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
10726 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
10727 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
10728 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
10729 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
10730 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
10731 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
10732 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
10733 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
10734 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
10735 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
10736 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
10737#else
10738 NOREF(pVmxTransient);
10739#endif
10740
10741 HMDumpRegs(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
10742 return VERR_VMX_INVALID_GUEST_STATE;
10743}
10744
10745
10746/**
10747 * VM-exit handler for VM-entry failure due to an MSR-load
10748 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
10749 */
10750HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10751{
10752 NOREF(pVmxTransient);
10753 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
10754 HMVMX_RETURN_UNEXPECTED_EXIT();
10755}
10756
10757
10758/**
10759 * VM-exit handler for VM-entry failure due to a machine-check event
10760 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
10761 */
10762HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10763{
10764 NOREF(pVmxTransient);
10765 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
10766 HMVMX_RETURN_UNEXPECTED_EXIT();
10767}
10768
10769
10770/**
10771 * VM-exit handler for all undefined reasons. Should never ever happen.. in
10772 * theory.
10773 */
10774HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10775{
10776 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
10777 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient);
10778 return VERR_VMX_UNDEFINED_EXIT_CODE;
10779}
10780
10781
10782/**
10783 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
10784 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
10785 * Conditional VM-exit.
10786 */
10787HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10788{
10789 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10790
10791 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
10792 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
10793 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
10794 return VERR_EM_INTERPRETER;
10795 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10796 HMVMX_RETURN_UNEXPECTED_EXIT();
10797}
10798
10799
10800/**
10801 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
10802 */
10803HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10804{
10805 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10806
10807 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
10808 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
10809 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
10810 return VERR_EM_INTERPRETER;
10811 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
10812 HMVMX_RETURN_UNEXPECTED_EXIT();
10813}
10814
10815
10816/**
10817 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
10818 */
10819HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10820{
10821 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10822
10823 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
10824 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10825 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10826 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10827 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10828 {
10829 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
10830 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
10831 }
10832 AssertRCReturn(rc, rc);
10833 Log4(("ecx=%#RX32\n", pMixedCtx->ecx));
10834
10835#ifdef VBOX_STRICT
10836 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
10837 {
10838 if ( hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)
10839 && pMixedCtx->ecx != MSR_K6_EFER)
10840 {
10841 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
10842 HMVMX_RETURN_UNEXPECTED_EXIT();
10843 }
10844# if HC_ARCH_BITS == 64
10845 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests
10846 && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10847 {
10848 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
10849 HMVMX_RETURN_UNEXPECTED_EXIT();
10850 }
10851# endif
10852 }
10853#endif
10854
10855 PVM pVM = pVCpu->CTX_SUFF(pVM);
10856 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10857 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
10858 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
10859 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
10860 if (RT_LIKELY(rc == VINF_SUCCESS))
10861 {
10862 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10863 Assert(pVmxTransient->cbInstr == 2);
10864 }
10865 return rc;
10866}
10867
10868
10869/**
10870 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
10871 */
10872HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10873{
10874 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10875 PVM pVM = pVCpu->CTX_SUFF(pVM);
10876 int rc = VINF_SUCCESS;
10877
10878 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
10879 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10880 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10881 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10882 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10883 {
10884 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
10885 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
10886 }
10887 AssertRCReturn(rc, rc);
10888 Log4(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", pMixedCtx->ecx, pMixedCtx->edx, pMixedCtx->eax));
10889
10890 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10891 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
10892 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
10893
10894 if (RT_LIKELY(rc == VINF_SUCCESS))
10895 {
10896 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
10897
10898 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
10899 if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START
10900 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
10901 {
10902 /* We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register
10903 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before
10904 EMInterpretWrmsr() changes it. */
10905 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
10906 }
10907 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
10908 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
10909 else if (pMixedCtx->ecx == MSR_K6_EFER)
10910 {
10911 /*
10912 * If the guest touches EFER we need to update the VM-Entry and VM-Exit controls as well,
10913 * even if it is -not- touching bits that cause paging mode changes (LMA/LME). We care about
10914 * the other bits as well, SCE and NXE. See @bugref{7368}.
10915 */
10916 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS);
10917 }
10918
10919 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */
10920 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
10921 {
10922 switch (pMixedCtx->ecx)
10923 {
10924 case MSR_IA32_SYSENTER_CS: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
10925 case MSR_IA32_SYSENTER_EIP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
10926 case MSR_IA32_SYSENTER_ESP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
10927 case MSR_K8_FS_BASE: /* no break */
10928 case MSR_K8_GS_BASE: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break;
10929 case MSR_K6_EFER: /* already handled above */ break;
10930 default:
10931 {
10932 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
10933 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
10934#if HC_ARCH_BITS == 64
10935 else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10936 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
10937#endif
10938 break;
10939 }
10940 }
10941 }
10942#ifdef VBOX_STRICT
10943 else
10944 {
10945 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
10946 switch (pMixedCtx->ecx)
10947 {
10948 case MSR_IA32_SYSENTER_CS:
10949 case MSR_IA32_SYSENTER_EIP:
10950 case MSR_IA32_SYSENTER_ESP:
10951 case MSR_K8_FS_BASE:
10952 case MSR_K8_GS_BASE:
10953 {
10954 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
10955 HMVMX_RETURN_UNEXPECTED_EXIT();
10956 }
10957
10958 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
10959 default:
10960 {
10961 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
10962 {
10963 /* EFER writes are always intercepted, see hmR0VmxLoadGuestMsrs(). */
10964 if (pMixedCtx->ecx != MSR_K6_EFER)
10965 {
10966 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
10967 pMixedCtx->ecx));
10968 HMVMX_RETURN_UNEXPECTED_EXIT();
10969 }
10970 }
10971
10972#if HC_ARCH_BITS == 64
10973 if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
10974 {
10975 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
10976 HMVMX_RETURN_UNEXPECTED_EXIT();
10977 }
10978#endif
10979 break;
10980 }
10981 }
10982 }
10983#endif /* VBOX_STRICT */
10984 }
10985 return rc;
10986}
10987
10988
10989/**
10990 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
10991 */
10992HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10993{
10994 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10995
10996 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT. */
10997 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
10998 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT)
10999 return VERR_EM_INTERPRETER;
11000 AssertMsgFailed(("Unexpected PAUSE exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11001 HMVMX_RETURN_UNEXPECTED_EXIT();
11002}
11003
11004
11005/**
11006 * VM-exit handler for when the TPR value is lowered below the specified
11007 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
11008 */
11009HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11010{
11011 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11012 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
11013
11014 /*
11015 * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
11016 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectPendingEvent() and
11017 * resume guest execution.
11018 */
11019 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
11020 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
11021 return VINF_SUCCESS;
11022}
11023
11024
11025/**
11026 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
11027 * VM-exit.
11028 *
11029 * @retval VINF_SUCCESS when guest execution can continue.
11030 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
11031 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
11032 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
11033 * interpreter.
11034 */
11035HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11036{
11037 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11038 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
11039 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11040 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11041 AssertRCReturn(rc, rc);
11042
11043 RTGCUINTPTR const uExitQualification = pVmxTransient->uExitQualification;
11044 uint32_t const uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
11045 PVM pVM = pVCpu->CTX_SUFF(pVM);
11046 VBOXSTRICTRC rcStrict;
11047 rc = hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, true /*fNeedRsp*/);
11048 switch (uAccessType)
11049 {
11050 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */
11051 {
11052 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11053 AssertRCReturn(rc, rc);
11054
11055 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr,
11056 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
11057 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
11058 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT || rcStrict == VINF_PGM_CHANGE_MODE
11059 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11060 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
11061 {
11062 case 0: /* CR0 */
11063 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
11064 Log4(("CRX CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr0));
11065 break;
11066 case 2: /* CR2 */
11067 /* Nothing to do here, CR2 it's not part of the VMCS. */
11068 break;
11069 case 3: /* CR3 */
11070 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
11071 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
11072 Log4(("CRX CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr3));
11073 break;
11074 case 4: /* CR4 */
11075 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
11076 Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n",
11077 VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
11078 break;
11079 case 8: /* CR8 */
11080 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
11081 /* CR8 contains the APIC TPR. Was updated by IEMExecDecodedMovCRxWrite(). */
11082 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
11083 break;
11084 default:
11085 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
11086 break;
11087 }
11088
11089 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
11090 break;
11091 }
11092
11093 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */
11094 {
11095 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11096 AssertRCReturn(rc, rc);
11097
11098 Assert( !pVM->hm.s.fNestedPaging
11099 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
11100 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
11101
11102 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
11103 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
11104 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
11105
11106 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr,
11107 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
11108 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
11109 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11110 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
11111 Log4(("CRX CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
11112 VBOXSTRICTRC_VAL(rcStrict)));
11113 break;
11114 }
11115
11116 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
11117 {
11118 AssertRCReturn(rc, rc);
11119 rcStrict = IEMExecDecodedClts(pVCpu, pVmxTransient->cbInstr);
11120 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11121 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
11122 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
11123 Log4(("CRX CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
11124 break;
11125 }
11126
11127 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
11128 {
11129 AssertRCReturn(rc, rc);
11130 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr,
11131 VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
11132 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT || rcStrict == VINF_PGM_CHANGE_MODE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11133 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
11134 Log4(("CRX LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
11135 break;
11136 }
11137
11138 default:
11139 AssertMsgFailedReturn(("Invalid access-type in Mov CRx VM-exit qualification %#x\n", uAccessType),
11140 VERR_VMX_UNEXPECTED_EXCEPTION);
11141 }
11142
11143 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);
11144 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
11145 return VBOXSTRICTRC_TODO(rcStrict);
11146}
11147
11148
11149/**
11150 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
11151 * VM-exit.
11152 */
11153HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11154{
11155 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11156 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
11157
11158 int rc2 = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11159 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11160 rc2 |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
11161 rc2 |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
11162 rc2 |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
11163 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
11164 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
11165 AssertRCReturn(rc2, rc2);
11166
11167 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
11168 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
11169 uint8_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
11170 bool fIOWrite = ( VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
11171 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
11172 bool fIOString = VMX_EXIT_QUALIFICATION_IO_IS_STRING(pVmxTransient->uExitQualification);
11173 bool fStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
11174 AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_VMX_IPE_1);
11175
11176 /* I/O operation lookup arrays. */
11177 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */
11178 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
11179
11180 VBOXSTRICTRC rcStrict;
11181 uint32_t const cbValue = s_aIOSizes[uIOWidth];
11182 uint32_t const cbInstr = pVmxTransient->cbInstr;
11183 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
11184 PVM pVM = pVCpu->CTX_SUFF(pVM);
11185 if (fIOString)
11186 {
11187#ifdef VBOX_WITH_2ND_IEM_STEP /* This used to gurus with debian 32-bit guest without NP (on ATA reads). See @bugref{5752#c158}. Should work now. */
11188 /*
11189 * INS/OUTS - I/O String instruction.
11190 *
11191 * Use instruction-information if available, otherwise fall back on
11192 * interpreting the instruction.
11193 */
11194 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
11195 AssertReturn(pMixedCtx->dx == uIOPort, VERR_VMX_IPE_2);
11196 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo))
11197 {
11198 rc2 = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
11199 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
11200 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11201 AssertRCReturn(rc2, rc2);
11202 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
11203 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
11204 IEMMODE enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
11205 bool fRep = VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification);
11206 if (fIOWrite)
11207 {
11208 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
11209 pVmxTransient->ExitInstrInfo.StrIo.iSegReg);
11210 }
11211 else
11212 {
11213 /*
11214 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
11215 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
11216 * See Intel Instruction spec. for "INS".
11217 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
11218 */
11219 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr);
11220 }
11221 }
11222 else
11223 {
11224 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
11225 rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11226 AssertRCReturn(rc2, rc2);
11227 rcStrict = IEMExecOne(pVCpu);
11228 }
11229 /** @todo IEM needs to be setting these flags somehow. */
11230 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
11231 fUpdateRipAlready = true;
11232#else
11233 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
11234 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL /* pcbInstr */);
11235 if (RT_SUCCESS(rcStrict))
11236 {
11237 if (fIOWrite)
11238 {
11239 rcStrict = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
11240 (DISCPUMODE)pDis->uAddrMode, cbValue);
11241 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
11242 }
11243 else
11244 {
11245 rcStrict = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
11246 (DISCPUMODE)pDis->uAddrMode, cbValue);
11247 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
11248 }
11249 }
11250 else
11251 {
11252 AssertMsg(rcStrict == VERR_EM_INTERPRETER, ("rcStrict=%Rrc RIP %#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->rip));
11253 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
11254 }
11255#endif
11256 }
11257 else
11258 {
11259 /*
11260 * IN/OUT - I/O instruction.
11261 */
11262 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
11263 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
11264 Assert(!VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification));
11265 if (fIOWrite)
11266 {
11267 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue);
11268 if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
11269 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
11270 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
11271 }
11272 else
11273 {
11274 uint32_t u32Result = 0;
11275 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
11276 if (IOM_SUCCESS(rcStrict))
11277 {
11278 /* Save result of I/O IN instr. in AL/AX/EAX. */
11279 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
11280 }
11281 else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
11282 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
11283 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
11284 }
11285 }
11286
11287 if (IOM_SUCCESS(rcStrict))
11288 {
11289 if (!fUpdateRipAlready)
11290 {
11291 pMixedCtx->rip += cbInstr;
11292 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
11293 }
11294
11295 /*
11296 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru while booting Fedora 17 64-bit guest.
11297 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
11298 */
11299 if (fIOString)
11300 {
11301 /** @todo Single-step for INS/OUTS with REP prefix? */
11302 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
11303 }
11304 else if (fStepping)
11305 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
11306
11307 /*
11308 * If any I/O breakpoints are armed, we need to check if one triggered
11309 * and take appropriate action.
11310 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
11311 */
11312 rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11313 AssertRCReturn(rc2, rc2);
11314
11315 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
11316 * execution engines about whether hyper BPs and such are pending. */
11317 uint32_t const uDr7 = pMixedCtx->dr[7];
11318 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
11319 && X86_DR7_ANY_RW_IO(uDr7)
11320 && (pMixedCtx->cr4 & X86_CR4_DE))
11321 || DBGFBpIsHwIoArmed(pVM)))
11322 {
11323 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
11324
11325 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
11326 VMMRZCallRing3Disable(pVCpu);
11327 HM_DISABLE_PREEMPT();
11328
11329 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
11330
11331 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue);
11332 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
11333 {
11334 /* Raise #DB. */
11335 if (fIsGuestDbgActive)
11336 ASMSetDR6(pMixedCtx->dr[6]);
11337 if (pMixedCtx->dr[7] != uDr7)
11338 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
11339
11340 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
11341 }
11342 /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */
11343 else if ( rcStrict2 != VINF_SUCCESS
11344 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
11345 rcStrict = rcStrict2;
11346
11347 HM_RESTORE_PREEMPT();
11348 VMMRZCallRing3Enable(pVCpu);
11349 }
11350 }
11351
11352#ifdef DEBUG
11353 if (rcStrict == VINF_IOM_R3_IOPORT_READ)
11354 Assert(!fIOWrite);
11355 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
11356 Assert(fIOWrite);
11357 else
11358 {
11359 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
11360 * statuses, that the VMM device and some others may return. See
11361 * IOM_SUCCESS() for guidance. */
11362 AssertMsg( RT_FAILURE(rcStrict)
11363 || rcStrict == VINF_SUCCESS
11364 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
11365 || rcStrict == VINF_EM_DBG_BREAKPOINT
11366 || rcStrict == VINF_EM_RAW_GUEST_TRAP
11367 || rcStrict == VINF_EM_RAW_TO_R3
11368 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11369 }
11370#endif
11371
11372 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
11373 return VBOXSTRICTRC_TODO(rcStrict);
11374}
11375
11376
11377/**
11378 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
11379 * VM-exit.
11380 */
11381HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11382{
11383 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11384
11385 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
11386 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11387 AssertRCReturn(rc, rc);
11388 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
11389 {
11390 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
11391 AssertRCReturn(rc, rc);
11392 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
11393 {
11394 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
11395
11396 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
11397 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
11398
11399 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */
11400 Assert(!pVCpu->hm.s.Event.fPending);
11401 pVCpu->hm.s.Event.fPending = true;
11402 pVCpu->hm.s.Event.u64IntInfo = pVmxTransient->uIdtVectoringInfo;
11403 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
11404 AssertRCReturn(rc, rc);
11405 if (fErrorCodeValid)
11406 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
11407 else
11408 pVCpu->hm.s.Event.u32ErrCode = 0;
11409 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
11410 && uVector == X86_XCPT_PF)
11411 {
11412 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2;
11413 }
11414
11415 Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
11416 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
11417 return VINF_EM_RAW_INJECT_TRPM_EVENT;
11418 }
11419 }
11420
11421 /** @todo Emulate task switch someday, currently just going back to ring-3 for
11422 * emulation. */
11423 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
11424 return VERR_EM_INTERPRETER;
11425}
11426
11427
11428/**
11429 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
11430 */
11431HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11432{
11433 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11434 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
11435 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
11436 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
11437 AssertRCReturn(rc, rc);
11438 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
11439 return VINF_EM_DBG_STEPPED;
11440}
11441
11442
11443/**
11444 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
11445 */
11446HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11447{
11448 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11449
11450 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11451 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11452 if (RT_UNLIKELY(rc != VINF_SUCCESS))
11453 {
11454 if (rc == VINF_HM_DOUBLE_FAULT)
11455 rc = VINF_SUCCESS;
11456 return rc;
11457 }
11458
11459#if 0
11460 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
11461 * just sync the whole thing. */
11462 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11463#else
11464 /* Aggressive state sync. for now. */
11465 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11466 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11467 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11468#endif
11469 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11470 AssertRCReturn(rc, rc);
11471
11472 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
11473 uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
11474 switch (uAccessType)
11475 {
11476 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
11477 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
11478 {
11479 AssertMsg( !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
11480 || VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) != 0x80,
11481 ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
11482
11483 RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
11484 GCPhys &= PAGE_BASE_GC_MASK;
11485 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
11486 PVM pVM = pVCpu->CTX_SUFF(pVM);
11487 Log4(("ApicAccess uAccessType=%#x GCPhys=%#RGv Off=%#x\n", uAccessType, GCPhys,
11488 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
11489
11490 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu,
11491 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW,
11492 CPUMCTX2CORE(pMixedCtx), GCPhys);
11493 rc = VBOXSTRICTRC_VAL(rc2);
11494 Log4(("ApicAccess rc=%d\n", rc));
11495 if ( rc == VINF_SUCCESS
11496 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11497 || rc == VERR_PAGE_NOT_PRESENT)
11498 {
11499 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11500 | HM_CHANGED_GUEST_RSP
11501 | HM_CHANGED_GUEST_RFLAGS
11502 | HM_CHANGED_VMX_GUEST_APIC_STATE);
11503 rc = VINF_SUCCESS;
11504 }
11505 break;
11506 }
11507
11508 default:
11509 Log4(("ApicAccess uAccessType=%#x\n", uAccessType));
11510 rc = VINF_EM_RAW_EMULATE_INSTR;
11511 break;
11512 }
11513
11514 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
11515 if (rc != VINF_SUCCESS)
11516 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchApicAccessToR3);
11517 return rc;
11518}
11519
11520
11521/**
11522 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
11523 * VM-exit.
11524 */
11525HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11526{
11527 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11528
11529 /* We should -not- get this VM-exit if the guest's debug registers were active. */
11530 if (pVmxTransient->fWasGuestDebugStateActive)
11531 {
11532 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11533 HMVMX_RETURN_UNEXPECTED_EXIT();
11534 }
11535
11536 int rc = VERR_INTERNAL_ERROR_5;
11537 if ( !DBGFIsStepping(pVCpu)
11538 && !pVCpu->hm.s.fSingleInstruction
11539 && !pVmxTransient->fWasHyperDebugStateActive)
11540 {
11541 /* Don't intercept MOV DRx and #DB any more. */
11542 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
11543 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
11544 AssertRCReturn(rc, rc);
11545
11546 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
11547 {
11548#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11549 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
11550 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
11551#endif
11552 }
11553
11554 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
11555 VMMRZCallRing3Disable(pVCpu);
11556 HM_DISABLE_PREEMPT();
11557
11558 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
11559 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
11560 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
11561
11562 HM_RESTORE_PREEMPT();
11563 VMMRZCallRing3Enable(pVCpu);
11564
11565#ifdef VBOX_WITH_STATISTICS
11566 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11567 AssertRCReturn(rc, rc);
11568 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
11569 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
11570 else
11571 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
11572#endif
11573 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
11574 return VINF_SUCCESS;
11575 }
11576
11577 /*
11578 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date.
11579 * Update the segment registers and DR7 from the CPU.
11580 */
11581 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11582 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11583 rc |= hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11584 AssertRCReturn(rc, rc);
11585 Log4(("CS:RIP=%04x:%08RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
11586
11587 PVM pVM = pVCpu->CTX_SUFF(pVM);
11588 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
11589 {
11590 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
11591 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
11592 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
11593 if (RT_SUCCESS(rc))
11594 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
11595 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
11596 }
11597 else
11598 {
11599 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
11600 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
11601 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
11602 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
11603 }
11604
11605 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
11606 if (RT_SUCCESS(rc))
11607 {
11608 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11609 AssertRCReturn(rc2, rc2);
11610 }
11611 return rc;
11612}
11613
11614
11615/**
11616 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
11617 * Conditional VM-exit.
11618 */
11619HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11620{
11621 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11622 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
11623
11624 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11625 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11626 if (RT_UNLIKELY(rc != VINF_SUCCESS))
11627 {
11628 if (rc == VINF_HM_DOUBLE_FAULT)
11629 rc = VINF_SUCCESS;
11630 return rc;
11631 }
11632
11633 RTGCPHYS GCPhys = 0;
11634 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
11635
11636#if 0
11637 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
11638#else
11639 /* Aggressive state sync. for now. */
11640 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11641 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11642 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11643#endif
11644 AssertRCReturn(rc, rc);
11645
11646 /*
11647 * If we succeed, resume guest execution.
11648 * If we fail in interpreting the instruction because we couldn't get the guest physical address
11649 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
11650 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
11651 * weird case. See @bugref{6043}.
11652 */
11653 PVM pVM = pVCpu->CTX_SUFF(pVM);
11654 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
11655 rc = VBOXSTRICTRC_VAL(rc2);
11656 Log4(("EPT misconfig at %#RGv RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc));
11657 if ( rc == VINF_SUCCESS
11658 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11659 || rc == VERR_PAGE_NOT_PRESENT)
11660 {
11661 /* Successfully handled MMIO operation. */
11662 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11663 | HM_CHANGED_GUEST_RSP
11664 | HM_CHANGED_GUEST_RFLAGS
11665 | HM_CHANGED_VMX_GUEST_APIC_STATE);
11666 rc = VINF_SUCCESS;
11667 }
11668 return rc;
11669}
11670
11671
11672/**
11673 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
11674 * VM-exit.
11675 */
11676HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11677{
11678 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11679 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
11680
11681 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11682 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11683 if (RT_UNLIKELY(rc != VINF_SUCCESS))
11684 {
11685 if (rc == VINF_HM_DOUBLE_FAULT)
11686 rc = VINF_SUCCESS;
11687 return rc;
11688 }
11689
11690 RTGCPHYS GCPhys = 0;
11691 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
11692 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11693#if 0
11694 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
11695#else
11696 /* Aggressive state sync. for now. */
11697 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
11698 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11699 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11700#endif
11701 AssertRCReturn(rc, rc);
11702
11703 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
11704 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
11705
11706 RTGCUINT uErrorCode = 0;
11707 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
11708 uErrorCode |= X86_TRAP_PF_ID;
11709 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
11710 uErrorCode |= X86_TRAP_PF_RW;
11711 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
11712 uErrorCode |= X86_TRAP_PF_P;
11713
11714 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
11715
11716 Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys,
11717 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
11718
11719 /* Handle the pagefault trap for the nested shadow table. */
11720 PVM pVM = pVCpu->CTX_SUFF(pVM);
11721 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
11722 TRPMResetTrap(pVCpu);
11723
11724 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
11725 if ( rc == VINF_SUCCESS
11726 || rc == VERR_PAGE_TABLE_NOT_PRESENT
11727 || rc == VERR_PAGE_NOT_PRESENT)
11728 {
11729 /* Successfully synced our nested page tables. */
11730 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
11731 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
11732 | HM_CHANGED_GUEST_RSP
11733 | HM_CHANGED_GUEST_RFLAGS);
11734 return VINF_SUCCESS;
11735 }
11736
11737 Log4(("EPT return to ring-3 rc=%Rrc\n", rc));
11738 return rc;
11739}
11740
11741/** @} */
11742
11743/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11744/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
11745/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11746
11747/** @name VM-exit exception handlers.
11748 * @{
11749 */
11750
11751/**
11752 * VM-exit exception handler for #MF (Math Fault: floating point exception).
11753 */
11754static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11755{
11756 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11757 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
11758
11759 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11760 AssertRCReturn(rc, rc);
11761
11762 if (!(pMixedCtx->cr0 & X86_CR0_NE))
11763 {
11764 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
11765 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
11766
11767 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
11768 * provides VM-exit instruction length. If this causes problem later,
11769 * disassemble the instruction like it's done on AMD-V. */
11770 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11771 AssertRCReturn(rc2, rc2);
11772 return rc;
11773 }
11774
11775 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11776 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11777 return rc;
11778}
11779
11780
11781/**
11782 * VM-exit exception handler for #BP (Breakpoint exception).
11783 */
11784static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11785{
11786 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11787 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
11788
11789 /** @todo Try optimize this by not saving the entire guest state unless
11790 * really needed. */
11791 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11792 AssertRCReturn(rc, rc);
11793
11794 PVM pVM = pVCpu->CTX_SUFF(pVM);
11795 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11796 if (rc == VINF_EM_RAW_GUEST_TRAP)
11797 {
11798 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11799 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11800 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11801 AssertRCReturn(rc, rc);
11802
11803 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11804 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11805 }
11806
11807 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
11808 return rc;
11809}
11810
11811
11812/**
11813 * VM-exit exception handler for #DB (Debug exception).
11814 */
11815static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11816{
11817 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11818 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
11819 Log6(("XcptDB\n"));
11820
11821 /*
11822 * Get the DR6-like values from the VM-exit qualification and pass it to DBGF
11823 * for processing.
11824 */
11825 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11826 AssertRCReturn(rc, rc);
11827
11828 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
11829 uint64_t uDR6 = X86_DR6_INIT_VAL;
11830 uDR6 |= ( pVmxTransient->uExitQualification
11831 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
11832
11833 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
11834 if (rc == VINF_EM_RAW_GUEST_TRAP)
11835 {
11836 /*
11837 * The exception was for the guest. Update DR6, DR7.GD and
11838 * IA32_DEBUGCTL.LBR before forwarding it.
11839 * (See Intel spec. 27.1 "Architectural State before a VM-Exit".)
11840 */
11841 VMMRZCallRing3Disable(pVCpu);
11842 HM_DISABLE_PREEMPT();
11843
11844 pMixedCtx->dr[6] &= ~X86_DR6_B_MASK;
11845 pMixedCtx->dr[6] |= uDR6;
11846 if (CPUMIsGuestDebugStateActive(pVCpu))
11847 ASMSetDR6(pMixedCtx->dr[6]);
11848
11849 HM_RESTORE_PREEMPT();
11850 VMMRZCallRing3Enable(pVCpu);
11851
11852 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
11853 AssertRCReturn(rc, rc);
11854
11855 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
11856 pMixedCtx->dr[7] &= ~X86_DR7_GD;
11857
11858 /* Paranoia. */
11859 pMixedCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
11860 pMixedCtx->dr[7] |= X86_DR7_RA1_MASK;
11861
11862 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
11863 AssertRCReturn(rc, rc);
11864
11865 /*
11866 * Raise #DB in the guest.
11867 *
11868 * It is important to reflect what the VM-exit gave us (preserving the interruption-type) rather than use
11869 * hmR0VmxSetPendingXcptDB() as the #DB could've been raised while executing ICEBP and not the 'normal' #DB.
11870 * Thus it -may- trigger different handling in the CPU (like skipped DPL checks). See @bugref{6398}.
11871 *
11872 * Since ICEBP isn't documented on Intel, see AMD spec. 15.20 "Event Injection".
11873 */
11874 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11875 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11876 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11877 AssertRCReturn(rc, rc);
11878 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11879 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11880 return VINF_SUCCESS;
11881 }
11882
11883 /*
11884 * Not a guest trap, must be a hypervisor related debug event then.
11885 * Update DR6 in case someone is interested in it.
11886 */
11887 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
11888 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
11889 CPUMSetHyperDR6(pVCpu, uDR6);
11890
11891 return rc;
11892}
11893
11894
11895/**
11896 * VM-exit exception handler for #NM (Device-not-available exception: floating
11897 * point exception).
11898 */
11899static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11900{
11901 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11902
11903 /* We require CR0 and EFER. EFER is always up-to-date. */
11904 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11905 AssertRCReturn(rc, rc);
11906
11907 /* We're playing with the host CPU state here, have to disable preemption or longjmp. */
11908 VMMRZCallRing3Disable(pVCpu);
11909 HM_DISABLE_PREEMPT();
11910
11911 /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */
11912 if (pVmxTransient->fWasGuestFPUStateActive)
11913 {
11914 rc = VINF_EM_RAW_GUEST_TRAP;
11915 Assert(CPUMIsGuestFPUStateActive(pVCpu) || HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));
11916 }
11917 else
11918 {
11919#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11920 Assert(!pVmxTransient->fWasGuestFPUStateActive);
11921#endif
11922 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
11923 Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu)));
11924 }
11925
11926 HM_RESTORE_PREEMPT();
11927 VMMRZCallRing3Enable(pVCpu);
11928
11929 if (rc == VINF_SUCCESS)
11930 {
11931 /* Guest FPU state was activated, we'll want to change CR0 FPU intercepts before the next VM-reentry. */
11932 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
11933 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
11934 pVCpu->hm.s.fPreloadGuestFpu = true;
11935 }
11936 else
11937 {
11938 /* Forward #NM to the guest. */
11939 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
11940 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11941 AssertRCReturn(rc, rc);
11942 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11943 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
11944 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
11945 }
11946
11947 return VINF_SUCCESS;
11948}
11949
11950
11951/**
11952 * VM-exit exception handler for #GP (General-protection exception).
11953 *
11954 * @remarks Requires pVmxTransient->uExitIntInfo to be up-to-date.
11955 */
11956static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11957{
11958 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
11959 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
11960
11961 int rc = VERR_INTERNAL_ERROR_5;
11962 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
11963 {
11964#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
11965 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
11966 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11967 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11968 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11969 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11970 AssertRCReturn(rc, rc);
11971 Log4(("#GP Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip,
11972 pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));
11973 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
11974 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
11975 return rc;
11976#else
11977 /* We don't intercept #GP. */
11978 AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
11979 NOREF(pVmxTransient);
11980 return VERR_VMX_UNEXPECTED_EXCEPTION;
11981#endif
11982 }
11983
11984 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
11985 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
11986
11987 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
11988 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11989 AssertRCReturn(rc, rc);
11990
11991 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
11992 uint32_t cbOp = 0;
11993 PVM pVM = pVCpu->CTX_SUFF(pVM);
11994 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
11995 if (RT_SUCCESS(rc))
11996 {
11997 rc = VINF_SUCCESS;
11998 Assert(cbOp == pDis->cbInstr);
11999 Log4(("#GP Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
12000 switch (pDis->pCurInstr->uOpcode)
12001 {
12002 case OP_CLI:
12003 {
12004 pMixedCtx->eflags.Bits.u1IF = 0;
12005 pMixedCtx->eflags.Bits.u1RF = 0;
12006 pMixedCtx->rip += pDis->cbInstr;
12007 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
12008 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
12009 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
12010 break;
12011 }
12012
12013 case OP_STI:
12014 {
12015 bool fOldIF = pMixedCtx->eflags.Bits.u1IF;
12016 pMixedCtx->eflags.Bits.u1IF = 1;
12017 pMixedCtx->eflags.Bits.u1RF = 0;
12018 pMixedCtx->rip += pDis->cbInstr;
12019 if (!fOldIF)
12020 {
12021 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
12022 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
12023 }
12024 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
12025 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
12026 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
12027 break;
12028 }
12029
12030 case OP_HLT:
12031 {
12032 rc = VINF_EM_HALT;
12033 pMixedCtx->rip += pDis->cbInstr;
12034 pMixedCtx->eflags.Bits.u1RF = 0;
12035 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
12036 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
12037 break;
12038 }
12039
12040 case OP_POPF:
12041 {
12042 Log4(("POPF CS:EIP %04x:%04RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
12043 uint32_t cbParm;
12044 uint32_t uMask;
12045 bool fStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
12046 if (pDis->fPrefix & DISPREFIX_OPSIZE)
12047 {
12048 cbParm = 4;
12049 uMask = 0xffffffff;
12050 }
12051 else
12052 {
12053 cbParm = 2;
12054 uMask = 0xffff;
12055 }
12056
12057 /* Get the stack pointer & pop the contents of the stack onto Eflags. */
12058 RTGCPTR GCPtrStack = 0;
12059 X86EFLAGS Eflags;
12060 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
12061 &GCPtrStack);
12062 if (RT_SUCCESS(rc))
12063 {
12064 Assert(sizeof(Eflags.u32) >= cbParm);
12065 Eflags.u32 = 0;
12066 rc = VBOXSTRICTRC_TODO(PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u32, cbParm, PGMACCESSORIGIN_HM));
12067 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc)); /** @todo allow strict return codes here */
12068 }
12069 if (RT_FAILURE(rc))
12070 {
12071 rc = VERR_EM_INTERPRETER;
12072 break;
12073 }
12074 Log4(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
12075 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF))
12076 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask);
12077 pMixedCtx->esp += cbParm;
12078 pMixedCtx->esp &= uMask;
12079 pMixedCtx->rip += pDis->cbInstr;
12080 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12081 | HM_CHANGED_GUEST_RSP
12082 | HM_CHANGED_GUEST_RFLAGS);
12083 /* Generate a pending-debug exception when stepping over POPF regardless of how POPF modifies EFLAGS.TF. */
12084 if (fStepping)
12085 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
12086
12087 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
12088 break;
12089 }
12090
12091 case OP_PUSHF:
12092 {
12093 uint32_t cbParm;
12094 uint32_t uMask;
12095 if (pDis->fPrefix & DISPREFIX_OPSIZE)
12096 {
12097 cbParm = 4;
12098 uMask = 0xffffffff;
12099 }
12100 else
12101 {
12102 cbParm = 2;
12103 uMask = 0xffff;
12104 }
12105
12106 /* Get the stack pointer & push the contents of eflags onto the stack. */
12107 RTGCPTR GCPtrStack = 0;
12108 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
12109 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
12110 if (RT_FAILURE(rc))
12111 {
12112 rc = VERR_EM_INTERPRETER;
12113 break;
12114 }
12115 X86EFLAGS Eflags = pMixedCtx->eflags;
12116 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
12117 Eflags.Bits.u1RF = 0;
12118 Eflags.Bits.u1VM = 0;
12119
12120 rc = VBOXSTRICTRC_TODO(PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u, cbParm, PGMACCESSORIGIN_HM));
12121 if (RT_UNLIKELY(rc != VINF_SUCCESS))
12122 {
12123 AssertMsgFailed(("%Rrc\n", rc)); /** @todo allow strict return codes here */
12124 rc = VERR_EM_INTERPRETER;
12125 break;
12126 }
12127 Log4(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack));
12128 pMixedCtx->esp -= cbParm;
12129 pMixedCtx->esp &= uMask;
12130 pMixedCtx->rip += pDis->cbInstr;
12131 pMixedCtx->eflags.Bits.u1RF = 0;
12132 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12133 | HM_CHANGED_GUEST_RSP
12134 | HM_CHANGED_GUEST_RFLAGS);
12135 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
12136 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
12137 break;
12138 }
12139
12140 case OP_IRET:
12141 {
12142 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
12143 * instruction reference. */
12144 RTGCPTR GCPtrStack = 0;
12145 uint32_t uMask = 0xffff;
12146 bool fStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
12147 uint16_t aIretFrame[3];
12148 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
12149 {
12150 rc = VERR_EM_INTERPRETER;
12151 break;
12152 }
12153 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
12154 &GCPtrStack);
12155 if (RT_SUCCESS(rc))
12156 {
12157 rc = VBOXSTRICTRC_TODO(PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame),
12158 PGMACCESSORIGIN_HM));
12159 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc)); /** @todo allow strict return codes here */
12160 }
12161 if (RT_FAILURE(rc))
12162 {
12163 rc = VERR_EM_INTERPRETER;
12164 break;
12165 }
12166 pMixedCtx->eip = 0;
12167 pMixedCtx->ip = aIretFrame[0];
12168 pMixedCtx->cs.Sel = aIretFrame[1];
12169 pMixedCtx->cs.ValidSel = aIretFrame[1];
12170 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
12171 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF))
12172 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
12173 pMixedCtx->sp += sizeof(aIretFrame);
12174 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12175 | HM_CHANGED_GUEST_SEGMENT_REGS
12176 | HM_CHANGED_GUEST_RSP
12177 | HM_CHANGED_GUEST_RFLAGS);
12178 /* Generate a pending-debug exception when stepping over IRET regardless of how IRET modifies EFLAGS.TF. */
12179 if (fStepping)
12180 hmR0VmxSetPendingDebugXcpt(pVCpu, pMixedCtx);
12181 Log4(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
12182 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
12183 break;
12184 }
12185
12186 case OP_INT:
12187 {
12188 uint16_t uVector = pDis->Param1.uValue & 0xff;
12189 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
12190 /* INT clears EFLAGS.TF, we mustn't set any pending debug exceptions here. */
12191 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
12192 break;
12193 }
12194
12195 case OP_INTO:
12196 {
12197 if (pMixedCtx->eflags.Bits.u1OF)
12198 {
12199 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
12200 /* INTO clears EFLAGS.TF, we mustn't set any pending debug exceptions here. */
12201 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
12202 }
12203 else
12204 {
12205 pMixedCtx->eflags.Bits.u1RF = 0;
12206 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
12207 }
12208 break;
12209 }
12210
12211 default:
12212 {
12213 pMixedCtx->eflags.Bits.u1RF = 0; /* This is correct most of the time... */
12214 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
12215 EMCODETYPE_SUPERVISOR);
12216 rc = VBOXSTRICTRC_VAL(rc2);
12217 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
12218 /** @todo We have to set pending-debug exceptions here when the guest is
12219 * single-stepping depending on the instruction that was interpreted. */
12220 Log4(("#GP rc=%Rrc\n", rc));
12221 break;
12222 }
12223 }
12224 }
12225 else
12226 rc = VERR_EM_INTERPRETER;
12227
12228 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
12229 ("#GP Unexpected rc=%Rrc\n", rc));
12230 return rc;
12231}
12232
12233
12234#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
12235/**
12236 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
12237 * the exception reported in the VMX transient structure back into the VM.
12238 *
12239 * @remarks Requires uExitIntInfo in the VMX transient structure to be
12240 * up-to-date.
12241 */
12242static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12243{
12244 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12245
12246 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
12247 hmR0VmxCheckExitDueToEventDelivery(). */
12248 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12249 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12250 AssertRCReturn(rc, rc);
12251 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
12252
12253#ifdef DEBUG_ramshankar
12254 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
12255 uint8_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
12256 Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip));
12257#endif
12258
12259 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12260 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
12261 return VINF_SUCCESS;
12262}
12263#endif
12264
12265
12266/**
12267 * VM-exit exception handler for #PF (Page-fault exception).
12268 */
12269static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12270{
12271 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12272 PVM pVM = pVCpu->CTX_SUFF(pVM);
12273 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12274 rc |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
12275 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12276 AssertRCReturn(rc, rc);
12277
12278#if defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) || defined(HMVMX_ALWAYS_TRAP_PF)
12279 if (pVM->hm.s.fNestedPaging)
12280 {
12281 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
12282 if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF))
12283 {
12284 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
12285 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12286 0 /* cbInstr */, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQualification);
12287 }
12288 else
12289 {
12290 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
12291 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
12292 Log4(("Pending #DF due to vectoring #PF. NP\n"));
12293 }
12294 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
12295 return rc;
12296 }
12297#else
12298 Assert(!pVM->hm.s.fNestedPaging);
12299 NOREF(pVM);
12300#endif
12301
12302 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
12303 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
12304 if (pVmxTransient->fVectoringPF)
12305 {
12306 Assert(pVCpu->hm.s.Event.fPending);
12307 return VINF_EM_RAW_INJECT_TRPM_EVENT;
12308 }
12309
12310 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
12311 AssertRCReturn(rc, rc);
12312
12313 Log4(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
12314 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntErrorCode, pMixedCtx->cr3));
12315
12316 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
12317 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pMixedCtx),
12318 (RTGCPTR)pVmxTransient->uExitQualification);
12319
12320 Log4(("#PF: rc=%Rrc\n", rc));
12321 if (rc == VINF_SUCCESS)
12322 {
12323 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
12324 /** @todo this isn't quite right, what if guest does lgdt with some MMIO
12325 * memory? We don't update the whole state here... */
12326 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12327 | HM_CHANGED_GUEST_RSP
12328 | HM_CHANGED_GUEST_RFLAGS
12329 | HM_CHANGED_VMX_GUEST_APIC_STATE);
12330 TRPMResetTrap(pVCpu);
12331 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
12332 return rc;
12333 }
12334
12335 if (rc == VINF_EM_RAW_GUEST_TRAP)
12336 {
12337 if (!pVmxTransient->fVectoringDoublePF)
12338 {
12339 /* It's a guest page fault and needs to be reflected to the guest. */
12340 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
12341 TRPMResetTrap(pVCpu);
12342 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
12343 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
12344 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12345 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
12346 }
12347 else
12348 {
12349 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
12350 TRPMResetTrap(pVCpu);
12351 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
12352 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
12353 Log4(("#PF: Pending #DF due to vectoring #PF\n"));
12354 }
12355
12356 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
12357 return VINF_SUCCESS;
12358 }
12359
12360 TRPMResetTrap(pVCpu);
12361 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
12362 return rc;
12363}
12364
12365/** @} */
12366
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette