VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 59612

Last change on this file since 59612 was 59612, checked in by vboxsync, 9 years ago

Build fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 571.6 KB
Line 
1/* $Id: HMVMXR0.cpp 59612 2016-02-09 09:18:31Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#include <iprt/x86.h>
24#include <iprt/asm-amd64-x86.h>
25#include <iprt/thread.h>
26
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/dbgf.h>
29#include <VBox/vmm/iem.h>
30#include <VBox/vmm/iom.h>
31#include <VBox/vmm/selm.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/gim.h>
34#ifdef VBOX_WITH_REM
35# include <VBox/vmm/rem.h>
36#endif
37#include "HMInternal.h"
38#include <VBox/vmm/vm.h>
39#include "HMVMXR0.h"
40#include "dtrace/VBoxVMM.h"
41
42#ifdef DEBUG_ramshankar
43# define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS
44# define HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE
45# define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
46# define HMVMX_ALWAYS_CHECK_GUEST_STATE
47# define HMVMX_ALWAYS_TRAP_ALL_XCPTS
48# define HMVMX_ALWAYS_TRAP_PF
49# define HMVMX_ALWAYS_SWAP_FPU_STATE
50# define HMVMX_ALWAYS_FLUSH_TLB
51# define HMVMX_ALWAYS_SWAP_EFER
52#endif
53
54
55/*********************************************************************************************************************************
56* Defined Constants And Macros *
57*********************************************************************************************************************************/
58/** Use the function table. */
59#define HMVMX_USE_FUNCTION_TABLE
60
61/** Determine which tagged-TLB flush handler to use. */
62#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
63#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
64#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
65#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
66
67/** @name Updated-guest-state flags.
68 * @{ */
69#define HMVMX_UPDATED_GUEST_RIP RT_BIT(0)
70#define HMVMX_UPDATED_GUEST_RSP RT_BIT(1)
71#define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2)
72#define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3)
73#define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4)
74#define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5)
75#define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6)
76#define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7)
77#define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8)
78#define HMVMX_UPDATED_GUEST_TR RT_BIT(9)
79#define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10)
80#define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(11)
81#define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(12)
82#define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(13)
83#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(14)
84#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(15)
85#define HMVMX_UPDATED_GUEST_LAZY_MSRS RT_BIT(16)
86#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(17)
87#define HMVMX_UPDATED_GUEST_INTR_STATE RT_BIT(18)
88#define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19)
89#define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \
90 | HMVMX_UPDATED_GUEST_RSP \
91 | HMVMX_UPDATED_GUEST_RFLAGS \
92 | HMVMX_UPDATED_GUEST_CR0 \
93 | HMVMX_UPDATED_GUEST_CR3 \
94 | HMVMX_UPDATED_GUEST_CR4 \
95 | HMVMX_UPDATED_GUEST_GDTR \
96 | HMVMX_UPDATED_GUEST_IDTR \
97 | HMVMX_UPDATED_GUEST_LDTR \
98 | HMVMX_UPDATED_GUEST_TR \
99 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \
100 | HMVMX_UPDATED_GUEST_DEBUG \
101 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \
102 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \
103 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \
104 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \
105 | HMVMX_UPDATED_GUEST_LAZY_MSRS \
106 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \
107 | HMVMX_UPDATED_GUEST_INTR_STATE \
108 | HMVMX_UPDATED_GUEST_APIC_STATE)
109/** @} */
110
111/** @name
112 * Flags to skip redundant reads of some common VMCS fields that are not part of
113 * the guest-CPU state but are in the transient structure.
114 */
115#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
116#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
117#define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
118#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3)
119#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
120#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
121#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO RT_BIT(6)
122/** @} */
123
124/** @name
125 * States of the VMCS.
126 *
127 * This does not reflect all possible VMCS states but currently only those
128 * needed for maintaining the VMCS consistently even when thread-context hooks
129 * are used. Maybe later this can be extended (i.e. Nested Virtualization).
130 */
131#define HMVMX_VMCS_STATE_CLEAR RT_BIT(0)
132#define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1)
133#define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2)
134/** @} */
135
136/**
137 * Exception bitmap mask for real-mode guests (real-on-v86).
138 *
139 * We need to intercept all exceptions manually except:
140 * - \#NM, \#MF handled in hmR0VmxLoadSharedCR0().
141 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
142 * due to bugs in Intel CPUs.
143 * - \#PF need not be intercepted even in real-mode if we have Nested Paging
144 * support.
145 */
146#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
147 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
148 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
149 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
150 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
151 /* RT_BIT(X86_XCPT_MF) always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
152 | RT_BIT(X86_XCPT_XF))
153
154/**
155 * Exception bitmap mask for all contributory exceptions.
156 *
157 * Page fault is deliberately excluded here as it's conditional as to whether
158 * it's contributory or benign. Page faults are handled separately.
159 */
160#define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
161 | RT_BIT(X86_XCPT_DE))
162
163/** Maximum VM-instruction error number. */
164#define HMVMX_INSTR_ERROR_MAX 28
165
166/** Profiling macro. */
167#ifdef HM_PROFILE_EXIT_DISPATCH
168# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
169# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
170#else
171# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
172# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
173#endif
174
175/** Assert that preemption is disabled or covered by thread-context hooks. */
176#define HMVMX_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHookIsEnabled(pVCpu) \
177 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
178
179/** Assert that we haven't migrated CPUs when thread-context hooks are not
180 * used. */
181#define HMVMX_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHookIsEnabled(pVCpu) \
182 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
183 ("Illegal migration! Entered on CPU %u Current %u\n", \
184 pVCpu->hm.s.idEnteredCpu, RTMpCpuId())); \
185
186/** Helper macro for VM-exit handlers called unexpectedly. */
187#define HMVMX_RETURN_UNEXPECTED_EXIT() \
188 do { \
189 pVCpu->hm.s.u32HMError = pVmxTransient->uExitReason; \
190 return VERR_VMX_UNEXPECTED_EXIT; \
191 } while (0)
192
193
194/*********************************************************************************************************************************
195* Structures and Typedefs *
196*********************************************************************************************************************************/
197/**
198 * VMX transient state.
199 *
200 * A state structure for holding miscellaneous information across
201 * VMX non-root operation and restored after the transition.
202 */
203typedef struct VMXTRANSIENT
204{
205 /** The host's rflags/eflags. */
206 RTCCUINTREG fEFlags;
207#if HC_ARCH_BITS == 32
208 uint32_t u32Alignment0;
209#endif
210 /** The guest's TPR value used for TPR shadowing. */
211 uint8_t u8GuestTpr;
212 /** Alignment. */
213 uint8_t abAlignment0[7];
214
215 /** The basic VM-exit reason. */
216 uint16_t uExitReason;
217 /** Alignment. */
218 uint16_t u16Alignment0;
219 /** The VM-exit interruption error code. */
220 uint32_t uExitIntErrorCode;
221 /** The VM-exit exit code qualification. */
222 uint64_t uExitQualification;
223
224 /** The VM-exit interruption-information field. */
225 uint32_t uExitIntInfo;
226 /** The VM-exit instruction-length field. */
227 uint32_t cbInstr;
228 /** The VM-exit instruction-information field. */
229 union
230 {
231 /** Plain unsigned int representation. */
232 uint32_t u;
233 /** INS and OUTS information. */
234 struct
235 {
236 uint32_t u6Reserved0 : 7;
237 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
238 uint32_t u3AddrSize : 3;
239 uint32_t u5Reserved1 : 5;
240 /** The segment register (X86_SREG_XXX). */
241 uint32_t iSegReg : 3;
242 uint32_t uReserved2 : 14;
243 } StrIo;
244 } ExitInstrInfo;
245 /** Whether the VM-entry failed or not. */
246 bool fVMEntryFailed;
247 /** Alignment. */
248 uint8_t abAlignment1[3];
249
250 /** The VM-entry interruption-information field. */
251 uint32_t uEntryIntInfo;
252 /** The VM-entry exception error code field. */
253 uint32_t uEntryXcptErrorCode;
254 /** The VM-entry instruction length field. */
255 uint32_t cbEntryInstr;
256
257 /** IDT-vectoring information field. */
258 uint32_t uIdtVectoringInfo;
259 /** IDT-vectoring error code. */
260 uint32_t uIdtVectoringErrorCode;
261
262 /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
263 uint32_t fVmcsFieldsRead;
264
265 /** Whether the guest FPU was active at the time of VM-exit. */
266 bool fWasGuestFPUStateActive;
267 /** Whether the guest debug state was active at the time of VM-exit. */
268 bool fWasGuestDebugStateActive;
269 /** Whether the hyper debug state was active at the time of VM-exit. */
270 bool fWasHyperDebugStateActive;
271 /** Whether TSC-offsetting should be setup before VM-entry. */
272 bool fUpdateTscOffsettingAndPreemptTimer;
273 /** Whether the VM-exit was caused by a page-fault during delivery of a
274 * contributory exception or a page-fault. */
275 bool fVectoringDoublePF;
276 /** Whether the VM-exit was caused by a page-fault during delivery of an
277 * external interrupt or NMI. */
278 bool fVectoringPF;
279} VMXTRANSIENT;
280AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
281AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntInfo, sizeof(uint64_t));
282AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo, sizeof(uint64_t));
283AssertCompileMemberAlignment(VMXTRANSIENT, fWasGuestFPUStateActive, sizeof(uint64_t));
284AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
285/** Pointer to VMX transient state. */
286typedef VMXTRANSIENT *PVMXTRANSIENT;
287
288
289/**
290 * MSR-bitmap read permissions.
291 */
292typedef enum VMXMSREXITREAD
293{
294 /** Reading this MSR causes a VM-exit. */
295 VMXMSREXIT_INTERCEPT_READ = 0xb,
296 /** Reading this MSR does not cause a VM-exit. */
297 VMXMSREXIT_PASSTHRU_READ
298} VMXMSREXITREAD;
299/** Pointer to MSR-bitmap read permissions. */
300typedef VMXMSREXITREAD* PVMXMSREXITREAD;
301
302/**
303 * MSR-bitmap write permissions.
304 */
305typedef enum VMXMSREXITWRITE
306{
307 /** Writing to this MSR causes a VM-exit. */
308 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
309 /** Writing to this MSR does not cause a VM-exit. */
310 VMXMSREXIT_PASSTHRU_WRITE
311} VMXMSREXITWRITE;
312/** Pointer to MSR-bitmap write permissions. */
313typedef VMXMSREXITWRITE* PVMXMSREXITWRITE;
314
315
316/**
317 * VMX VM-exit handler.
318 *
319 * @returns Strict VBox status code (i.e. informational status codes too).
320 * @param pVCpu The cross context virtual CPU structure.
321 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
322 * out-of-sync. Make sure to update the required
323 * fields before using them.
324 * @param pVmxTransient Pointer to the VMX-transient structure.
325 */
326#ifndef HMVMX_USE_FUNCTION_TABLE
327typedef DECLINLINE(VBOXSTRICTRC) FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
328#else
329typedef DECLCALLBACK(VBOXSTRICTRC) FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
330/** Pointer to VM-exit handler. */
331typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
332#endif
333
334/**
335 * VMX VM-exit handler, non-strict status code.
336 *
337 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
338 *
339 * @returns VBox status code, no informational status code returned.
340 * @param pVCpu The cross context virtual CPU structure.
341 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
342 * out-of-sync. Make sure to update the required
343 * fields before using them.
344 * @param pVmxTransient Pointer to the VMX-transient structure.
345 *
346 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
347 * use of that status code will be replaced with VINF_EM_SOMETHING
348 * later when switching over to IEM.
349 */
350#ifndef HMVMX_USE_FUNCTION_TABLE
351typedef DECLINLINE(int) FNVMXEXITHANDLERNSRC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
352#else
353typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
354#endif
355
356
357/*********************************************************************************************************************************
358* Internal Functions *
359*********************************************************************************************************************************/
360static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush);
361static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr);
362static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu);
363static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
364 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress,
365 bool fStepping, uint32_t *puIntState);
366#if HC_ARCH_BITS == 32
367static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
368#endif
369#ifndef HMVMX_USE_FUNCTION_TABLE
370DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
371# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
372# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
373#else
374# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
375# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
376#endif
377
378
379/** @name VM-exit handlers.
380 * @{
381 */
382static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmi;
383static FNVMXEXITHANDLER hmR0VmxExitExtInt;
384static FNVMXEXITHANDLER hmR0VmxExitTripleFault;
385static FNVMXEXITHANDLERNSRC hmR0VmxExitInitSignal;
386static FNVMXEXITHANDLERNSRC hmR0VmxExitSipi;
387static FNVMXEXITHANDLERNSRC hmR0VmxExitIoSmi;
388static FNVMXEXITHANDLERNSRC hmR0VmxExitSmi;
389static FNVMXEXITHANDLERNSRC hmR0VmxExitIntWindow;
390static FNVMXEXITHANDLERNSRC hmR0VmxExitNmiWindow;
391static FNVMXEXITHANDLER hmR0VmxExitTaskSwitch;
392static FNVMXEXITHANDLER hmR0VmxExitCpuid;
393static FNVMXEXITHANDLER hmR0VmxExitGetsec;
394static FNVMXEXITHANDLER hmR0VmxExitHlt;
395static FNVMXEXITHANDLERNSRC hmR0VmxExitInvd;
396static FNVMXEXITHANDLER hmR0VmxExitInvlpg;
397static FNVMXEXITHANDLER hmR0VmxExitRdpmc;
398static FNVMXEXITHANDLER hmR0VmxExitVmcall;
399static FNVMXEXITHANDLER hmR0VmxExitRdtsc;
400static FNVMXEXITHANDLERNSRC hmR0VmxExitRsm;
401static FNVMXEXITHANDLERNSRC hmR0VmxExitSetPendingXcptUD;
402static FNVMXEXITHANDLER hmR0VmxExitMovCRx;
403static FNVMXEXITHANDLER hmR0VmxExitMovDRx;
404static FNVMXEXITHANDLER hmR0VmxExitIoInstr;
405static FNVMXEXITHANDLER hmR0VmxExitRdmsr;
406static FNVMXEXITHANDLER hmR0VmxExitWrmsr;
407static FNVMXEXITHANDLERNSRC hmR0VmxExitErrInvalidGuestState;
408static FNVMXEXITHANDLERNSRC hmR0VmxExitErrMsrLoad;
409static FNVMXEXITHANDLERNSRC hmR0VmxExitErrUndefined;
410static FNVMXEXITHANDLER hmR0VmxExitMwait;
411static FNVMXEXITHANDLER hmR0VmxExitMtf;
412static FNVMXEXITHANDLER hmR0VmxExitMonitor;
413static FNVMXEXITHANDLER hmR0VmxExitPause;
414static FNVMXEXITHANDLERNSRC hmR0VmxExitErrMachineCheck;
415static FNVMXEXITHANDLERNSRC hmR0VmxExitTprBelowThreshold;
416static FNVMXEXITHANDLER hmR0VmxExitApicAccess;
417static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
418static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
419static FNVMXEXITHANDLER hmR0VmxExitEptViolation;
420static FNVMXEXITHANDLER hmR0VmxExitEptMisconfig;
421static FNVMXEXITHANDLER hmR0VmxExitRdtscp;
422static FNVMXEXITHANDLER hmR0VmxExitPreemptTimer;
423static FNVMXEXITHANDLERNSRC hmR0VmxExitWbinvd;
424static FNVMXEXITHANDLER hmR0VmxExitXsetbv;
425static FNVMXEXITHANDLER hmR0VmxExitRdrand;
426static FNVMXEXITHANDLER hmR0VmxExitInvpcid;
427/** @} */
428
429static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
430static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
431static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
432static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
433static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
434static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
435static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
436static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
437static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
438
439
440/*********************************************************************************************************************************
441* Global Variables *
442*********************************************************************************************************************************/
443#ifdef HMVMX_USE_FUNCTION_TABLE
444
445/**
446 * VMX_EXIT dispatch table.
447 */
448static const PFNVMXEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
449{
450 /* 00 VMX_EXIT_XCPT_OR_NMI */ hmR0VmxExitXcptOrNmi,
451 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
452 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
453 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
454 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
455 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
456 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
457 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
458 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
459 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
460 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
461 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
462 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
463 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
464 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
465 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
466 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
467 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
468 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitVmcall,
469 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
470 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
471 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
472 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
473 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
474 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
475 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
476 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
477 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
478 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
479 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
480 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
481 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
482 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
483 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
484 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
485 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
486 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
487 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
488 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
489 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
490 /* 40 UNDEFINED */ hmR0VmxExitPause,
491 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
492 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
493 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
494 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
495 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
496 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
497 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
498 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
499 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
500 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
501 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
502 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
503 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
504 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
505 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
506 /* 56 VMX_EXIT_APIC_WRITE */ hmR0VmxExitErrUndefined,
507 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
508 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
509 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD,
510 /* 60 VMX_EXIT_RESERVED_60 */ hmR0VmxExitErrUndefined,
511 /* 61 VMX_EXIT_RDSEED */ hmR0VmxExitErrUndefined, /* only spurious exits, so undefined */
512 /* 62 VMX_EXIT_RESERVED_62 */ hmR0VmxExitErrUndefined,
513 /* 63 VMX_EXIT_XSAVES */ hmR0VmxExitSetPendingXcptUD,
514 /* 64 VMX_EXIT_XRSTORS */ hmR0VmxExitSetPendingXcptUD,
515};
516#endif /* HMVMX_USE_FUNCTION_TABLE */
517
518#ifdef VBOX_STRICT
519static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
520{
521 /* 0 */ "(Not Used)",
522 /* 1 */ "VMCALL executed in VMX root operation.",
523 /* 2 */ "VMCLEAR with invalid physical address.",
524 /* 3 */ "VMCLEAR with VMXON pointer.",
525 /* 4 */ "VMLAUNCH with non-clear VMCS.",
526 /* 5 */ "VMRESUME with non-launched VMCS.",
527 /* 6 */ "VMRESUME after VMXOFF",
528 /* 7 */ "VM-entry with invalid control fields.",
529 /* 8 */ "VM-entry with invalid host state fields.",
530 /* 9 */ "VMPTRLD with invalid physical address.",
531 /* 10 */ "VMPTRLD with VMXON pointer.",
532 /* 11 */ "VMPTRLD with incorrect revision identifier.",
533 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
534 /* 13 */ "VMWRITE to read-only VMCS component.",
535 /* 14 */ "(Not Used)",
536 /* 15 */ "VMXON executed in VMX root operation.",
537 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
538 /* 17 */ "VM-entry with non-launched executing VMCS.",
539 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
540 /* 19 */ "VMCALL with non-clear VMCS.",
541 /* 20 */ "VMCALL with invalid VM-exit control fields.",
542 /* 21 */ "(Not Used)",
543 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
544 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
545 /* 24 */ "VMCALL with invalid SMM-monitor features.",
546 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
547 /* 26 */ "VM-entry with events blocked by MOV SS.",
548 /* 27 */ "(Not Used)",
549 /* 28 */ "Invalid operand to INVEPT/INVVPID."
550};
551#endif /* VBOX_STRICT */
552
553
554
555/**
556 * Updates the VM's last error record.
557 *
558 * If there was a VMX instruction error, reads the error data from the VMCS and
559 * updates VCPU's last error record as well.
560 *
561 * @param pVM The cross context VM structure.
562 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
563 * Can be NULL if @a rc is not VERR_VMX_UNABLE_TO_START_VM or
564 * VERR_VMX_INVALID_VMCS_FIELD.
565 * @param rc The error code.
566 */
567static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
568{
569 AssertPtr(pVM);
570 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
571 || rc == VERR_VMX_UNABLE_TO_START_VM)
572 {
573 AssertPtrReturnVoid(pVCpu);
574 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
575 }
576 pVM->hm.s.lLastError = rc;
577}
578
579
580/**
581 * Reads the VM-entry interruption-information field from the VMCS into the VMX
582 * transient structure.
583 *
584 * @returns VBox status code.
585 * @param pVmxTransient Pointer to the VMX transient structure.
586 *
587 * @remarks No-long-jump zone!!!
588 */
589DECLINLINE(int) hmR0VmxReadEntryIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
590{
591 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
592 AssertRCReturn(rc, rc);
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Reads the VM-entry exception error code field from the VMCS into
599 * the VMX transient structure.
600 *
601 * @returns VBox status code.
602 * @param pVmxTransient Pointer to the VMX transient structure.
603 *
604 * @remarks No-long-jump zone!!!
605 */
606DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
607{
608 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
609 AssertRCReturn(rc, rc);
610 return VINF_SUCCESS;
611}
612
613
614/**
615 * Reads the VM-entry exception error code field from the VMCS into
616 * the VMX transient structure.
617 *
618 * @returns VBox status code.
619 * @param pVmxTransient Pointer to the VMX transient structure.
620 *
621 * @remarks No-long-jump zone!!!
622 */
623DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
624{
625 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
626 AssertRCReturn(rc, rc);
627 return VINF_SUCCESS;
628}
629
630
631/**
632 * Reads the VM-exit interruption-information field from the VMCS into the VMX
633 * transient structure.
634 *
635 * @returns VBox status code.
636 * @param pVmxTransient Pointer to the VMX transient structure.
637 */
638DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
639{
640 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
641 {
642 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
643 AssertRCReturn(rc, rc);
644 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
645 }
646 return VINF_SUCCESS;
647}
648
649
650/**
651 * Reads the VM-exit interruption error code from the VMCS into the VMX
652 * transient structure.
653 *
654 * @returns VBox status code.
655 * @param pVmxTransient Pointer to the VMX transient structure.
656 */
657DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
658{
659 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
660 {
661 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
662 AssertRCReturn(rc, rc);
663 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
664 }
665 return VINF_SUCCESS;
666}
667
668
669/**
670 * Reads the VM-exit instruction length field from the VMCS into the VMX
671 * transient structure.
672 *
673 * @returns VBox status code.
674 * @param pVmxTransient Pointer to the VMX transient structure.
675 */
676DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
677{
678 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
679 {
680 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
681 AssertRCReturn(rc, rc);
682 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
683 }
684 return VINF_SUCCESS;
685}
686
687
688/**
689 * Reads the VM-exit instruction-information field from the VMCS into
690 * the VMX transient structure.
691 *
692 * @returns VBox status code.
693 * @param pVmxTransient Pointer to the VMX transient structure.
694 */
695DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient)
696{
697 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO))
698 {
699 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
700 AssertRCReturn(rc, rc);
701 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO;
702 }
703 return VINF_SUCCESS;
704}
705
706
707/**
708 * Reads the exit code qualification from the VMCS into the VMX transient
709 * structure.
710 *
711 * @returns VBox status code.
712 * @param pVCpu The cross context virtual CPU structure of the
713 * calling EMT. (Required for the VMCS cache case.)
714 * @param pVmxTransient Pointer to the VMX transient structure.
715 */
716DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
717{
718 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
719 {
720 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification); NOREF(pVCpu);
721 AssertRCReturn(rc, rc);
722 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
723 }
724 return VINF_SUCCESS;
725}
726
727
728/**
729 * Reads the IDT-vectoring information field from the VMCS into the VMX
730 * transient structure.
731 *
732 * @returns VBox status code.
733 * @param pVmxTransient Pointer to the VMX transient structure.
734 *
735 * @remarks No-long-jump zone!!!
736 */
737DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
738{
739 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
740 {
741 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
742 AssertRCReturn(rc, rc);
743 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
744 }
745 return VINF_SUCCESS;
746}
747
748
749/**
750 * Reads the IDT-vectoring error code from the VMCS into the VMX
751 * transient structure.
752 *
753 * @returns VBox status code.
754 * @param pVmxTransient Pointer to the VMX transient structure.
755 */
756DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
757{
758 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
759 {
760 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
761 AssertRCReturn(rc, rc);
762 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
763 }
764 return VINF_SUCCESS;
765}
766
767
768/**
769 * Enters VMX root mode operation on the current CPU.
770 *
771 * @returns VBox status code.
772 * @param pVM The cross context VM structure. Can be
773 * NULL, after a resume.
774 * @param HCPhysCpuPage Physical address of the VMXON region.
775 * @param pvCpuPage Pointer to the VMXON region.
776 */
777static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
778{
779 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
780 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
781 Assert(pvCpuPage);
782 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
783
784 if (pVM)
785 {
786 /* Write the VMCS revision dword to the VMXON region. */
787 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
788 }
789
790 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */
791 RTCCUINTREG fEFlags = ASMIntDisableFlags();
792
793 /* Enable the VMX bit in CR4 if necessary. */
794 RTCCUINTREG uOldCr4 = SUPR0ChangeCR4(X86_CR4_VMXE, ~0);
795
796 /* Enter VMX root mode. */
797 int rc = VMXEnable(HCPhysCpuPage);
798 if (RT_FAILURE(rc))
799 {
800 if (!(uOldCr4 & X86_CR4_VMXE))
801 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
802
803 if (pVM)
804 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
805 }
806
807 /* Restore interrupts. */
808 ASMSetFlags(fEFlags);
809 return rc;
810}
811
812
813/**
814 * Exits VMX root mode operation on the current CPU.
815 *
816 * @returns VBox status code.
817 */
818static int hmR0VmxLeaveRootMode(void)
819{
820 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
821
822 /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */
823 RTCCUINTREG fEFlags = ASMIntDisableFlags();
824
825 /* If we're for some reason not in VMX root mode, then don't leave it. */
826 RTCCUINTREG uHostCR4 = ASMGetCR4();
827
828 int rc;
829 if (uHostCR4 & X86_CR4_VMXE)
830 {
831 /* Exit VMX root mode and clear the VMX bit in CR4. */
832 VMXDisable();
833 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
834 rc = VINF_SUCCESS;
835 }
836 else
837 rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE;
838
839 /* Restore interrupts. */
840 ASMSetFlags(fEFlags);
841 return rc;
842}
843
844
845/**
846 * Allocates and maps one physically contiguous page. The allocated page is
847 * zero'd out. (Used by various VT-x structures).
848 *
849 * @returns IPRT status code.
850 * @param pMemObj Pointer to the ring-0 memory object.
851 * @param ppVirt Where to store the virtual address of the
852 * allocation.
853 * @param pHCPhys Where to store the physical address of the
854 * allocation.
855 */
856DECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
857{
858 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
859 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
860 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
861
862 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
863 if (RT_FAILURE(rc))
864 return rc;
865 *ppVirt = RTR0MemObjAddress(*pMemObj);
866 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
867 ASMMemZero32(*ppVirt, PAGE_SIZE);
868 return VINF_SUCCESS;
869}
870
871
872/**
873 * Frees and unmaps an allocated physical page.
874 *
875 * @param pMemObj Pointer to the ring-0 memory object.
876 * @param ppVirt Where to re-initialize the virtual address of
877 * allocation as 0.
878 * @param pHCPhys Where to re-initialize the physical address of the
879 * allocation as 0.
880 */
881DECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
882{
883 AssertPtr(pMemObj);
884 AssertPtr(ppVirt);
885 AssertPtr(pHCPhys);
886 if (*pMemObj != NIL_RTR0MEMOBJ)
887 {
888 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
889 AssertRC(rc);
890 *pMemObj = NIL_RTR0MEMOBJ;
891 *ppVirt = 0;
892 *pHCPhys = 0;
893 }
894}
895
896
897/**
898 * Worker function to free VT-x related structures.
899 *
900 * @returns IPRT status code.
901 * @param pVM The cross context VM structure.
902 */
903static void hmR0VmxStructsFree(PVM pVM)
904{
905 for (VMCPUID i = 0; i < pVM->cCpus; i++)
906 {
907 PVMCPU pVCpu = &pVM->aCpus[i];
908 AssertPtr(pVCpu);
909
910 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
911 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
912
913 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
914 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
915
916 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
917 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
918 }
919
920 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
921#ifdef VBOX_WITH_CRASHDUMP_MAGIC
922 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
923#endif
924}
925
926
927/**
928 * Worker function to allocate VT-x related VM structures.
929 *
930 * @returns IPRT status code.
931 * @param pVM The cross context VM structure.
932 */
933static int hmR0VmxStructsAlloc(PVM pVM)
934{
935 /*
936 * Initialize members up-front so we can cleanup properly on allocation failure.
937 */
938#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
939 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
940 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
941 pVM->hm.s.vmx.HCPhys##a_Name = 0;
942
943#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
944 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
945 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
946 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
947
948#ifdef VBOX_WITH_CRASHDUMP_MAGIC
949 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
950#endif
951 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
952
953 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
954 for (VMCPUID i = 0; i < pVM->cCpus; i++)
955 {
956 PVMCPU pVCpu = &pVM->aCpus[i];
957 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
958 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
959 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
960 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
961 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
962 }
963#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
964#undef VMXLOCAL_INIT_VM_MEMOBJ
965
966 /* The VMCS size cannot be more than 4096 bytes. See Intel spec. Appendix A.1 "Basic VMX Information". */
967 AssertReturnStmt(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.Msrs.u64BasicInfo) <= PAGE_SIZE,
968 (&pVM->aCpus[0])->hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE,
969 VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO);
970
971 /*
972 * Allocate all the VT-x structures.
973 */
974 int rc = VINF_SUCCESS;
975#ifdef VBOX_WITH_CRASHDUMP_MAGIC
976 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
977 if (RT_FAILURE(rc))
978 goto cleanup;
979 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
980 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
981#endif
982
983 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
984 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
985 {
986 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
987 &pVM->hm.s.vmx.HCPhysApicAccess);
988 if (RT_FAILURE(rc))
989 goto cleanup;
990 }
991
992 /*
993 * Initialize per-VCPU VT-x structures.
994 */
995 for (VMCPUID i = 0; i < pVM->cCpus; i++)
996 {
997 PVMCPU pVCpu = &pVM->aCpus[i];
998 AssertPtr(pVCpu);
999
1000 /* Allocate the VM control structure (VMCS). */
1001 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
1002 if (RT_FAILURE(rc))
1003 goto cleanup;
1004
1005 /* Allocate the Virtual-APIC page for transparent TPR accesses. */
1006 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
1007 {
1008 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
1009 &pVCpu->hm.s.vmx.HCPhysVirtApic);
1010 if (RT_FAILURE(rc))
1011 goto cleanup;
1012 }
1013
1014 /*
1015 * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for
1016 * transparent accesses of specific MSRs.
1017 *
1018 * If the condition for enabling MSR bitmaps changes here, don't forget to
1019 * update HMAreMsrBitmapsAvailable().
1020 */
1021 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1022 {
1023 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
1024 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1025 if (RT_FAILURE(rc))
1026 goto cleanup;
1027 ASMMemFill32(pVCpu->hm.s.vmx.pvMsrBitmap, PAGE_SIZE, UINT32_C(0xffffffff));
1028 }
1029
1030 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
1031 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
1032 if (RT_FAILURE(rc))
1033 goto cleanup;
1034
1035 /* Allocate the VM-exit MSR-load page for the host MSRs. */
1036 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
1037 if (RT_FAILURE(rc))
1038 goto cleanup;
1039 }
1040
1041 return VINF_SUCCESS;
1042
1043cleanup:
1044 hmR0VmxStructsFree(pVM);
1045 return rc;
1046}
1047
1048
1049/**
1050 * Does global VT-x initialization (called during module initialization).
1051 *
1052 * @returns VBox status code.
1053 */
1054VMMR0DECL(int) VMXR0GlobalInit(void)
1055{
1056#ifdef HMVMX_USE_FUNCTION_TABLE
1057 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
1058# ifdef VBOX_STRICT
1059 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
1060 Assert(g_apfnVMExitHandlers[i]);
1061# endif
1062#endif
1063 return VINF_SUCCESS;
1064}
1065
1066
1067/**
1068 * Does global VT-x termination (called during module termination).
1069 */
1070VMMR0DECL(void) VMXR0GlobalTerm()
1071{
1072 /* Nothing to do currently. */
1073}
1074
1075
1076/**
1077 * Sets up and activates VT-x on the current CPU.
1078 *
1079 * @returns VBox status code.
1080 * @param pCpu Pointer to the global CPU info struct.
1081 * @param pVM The cross context VM structure. Can be
1082 * NULL after a host resume operation.
1083 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
1084 * fEnabledByHost is @c true).
1085 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
1086 * @a fEnabledByHost is @c true).
1087 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
1088 * enable VT-x on the host.
1089 * @param pvMsrs Opaque pointer to VMXMSRS struct.
1090 */
1091VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
1092 void *pvMsrs)
1093{
1094 Assert(pCpu);
1095 Assert(pvMsrs);
1096 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1097
1098 /* Enable VT-x if it's not already enabled by the host. */
1099 if (!fEnabledByHost)
1100 {
1101 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
1102 if (RT_FAILURE(rc))
1103 return rc;
1104 }
1105
1106 /*
1107 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been using EPTPs) so
1108 * we don't retain any stale guest-physical mappings which won't get invalidated when flushing by VPID.
1109 */
1110 PVMXMSRS pMsrs = (PVMXMSRS)pvMsrs;
1111 if (pMsrs->u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1112 {
1113 hmR0VmxFlushEpt(NULL /* pVCpu */, VMXFLUSHEPT_ALL_CONTEXTS);
1114 pCpu->fFlushAsidBeforeUse = false;
1115 }
1116 else
1117 pCpu->fFlushAsidBeforeUse = true;
1118
1119 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
1120 ++pCpu->cTlbFlushes;
1121
1122 return VINF_SUCCESS;
1123}
1124
1125
1126/**
1127 * Deactivates VT-x on the current CPU.
1128 *
1129 * @returns VBox status code.
1130 * @param pCpu Pointer to the global CPU info struct.
1131 * @param pvCpuPage Pointer to the VMXON region.
1132 * @param HCPhysCpuPage Physical address of the VMXON region.
1133 *
1134 * @remarks This function should never be called when SUPR0EnableVTx() or
1135 * similar was used to enable VT-x on the host.
1136 */
1137VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
1138{
1139 NOREF(pCpu);
1140 NOREF(pvCpuPage);
1141 NOREF(HCPhysCpuPage);
1142
1143 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1144 return hmR0VmxLeaveRootMode();
1145}
1146
1147
1148/**
1149 * Sets the permission bits for the specified MSR in the MSR bitmap.
1150 *
1151 * @param pVCpu The cross context virtual CPU structure.
1152 * @param uMsr The MSR value.
1153 * @param enmRead Whether reading this MSR causes a VM-exit.
1154 * @param enmWrite Whether writing this MSR causes a VM-exit.
1155 */
1156static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
1157{
1158 int32_t iBit;
1159 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1160
1161 /*
1162 * Layout:
1163 * 0x000 - 0x3ff - Low MSR read bits
1164 * 0x400 - 0x7ff - High MSR read bits
1165 * 0x800 - 0xbff - Low MSR write bits
1166 * 0xc00 - 0xfff - High MSR write bits
1167 */
1168 if (uMsr <= 0x00001FFF)
1169 iBit = uMsr;
1170 else if (uMsr - UINT32_C(0xC0000000) <= UINT32_C(0x00001FFF))
1171 {
1172 iBit = uMsr - UINT32_C(0xC0000000);
1173 pbMsrBitmap += 0x400;
1174 }
1175 else
1176 AssertMsgFailedReturnVoid(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1177
1178 Assert(iBit <= 0x1fff);
1179 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1180 ASMBitSet(pbMsrBitmap, iBit);
1181 else
1182 ASMBitClear(pbMsrBitmap, iBit);
1183
1184 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1185 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1186 else
1187 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1188}
1189
1190
1191#ifdef VBOX_STRICT
1192/**
1193 * Gets the permission bits for the specified MSR in the MSR bitmap.
1194 *
1195 * @returns VBox status code.
1196 * @retval VINF_SUCCESS if the specified MSR is found.
1197 * @retval VERR_NOT_FOUND if the specified MSR is not found.
1198 * @retval VERR_NOT_SUPPORTED if VT-x doesn't allow the MSR.
1199 *
1200 * @param pVCpu The cross context virtual CPU structure.
1201 * @param uMsr The MSR.
1202 * @param penmRead Where to store the read permissions.
1203 * @param penmWrite Where to store the write permissions.
1204 */
1205static int hmR0VmxGetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, PVMXMSREXITREAD penmRead, PVMXMSREXITWRITE penmWrite)
1206{
1207 AssertPtrReturn(penmRead, VERR_INVALID_PARAMETER);
1208 AssertPtrReturn(penmWrite, VERR_INVALID_PARAMETER);
1209 int32_t iBit;
1210 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1211
1212 /* See hmR0VmxSetMsrPermission() for the layout. */
1213 if (uMsr <= 0x00001FFF)
1214 iBit = uMsr;
1215 else if ( uMsr >= 0xC0000000
1216 && uMsr <= 0xC0001FFF)
1217 {
1218 iBit = (uMsr - 0xC0000000);
1219 pbMsrBitmap += 0x400;
1220 }
1221 else
1222 AssertMsgFailedReturn(("hmR0VmxGetMsrPermission: Invalid MSR %#RX32\n", uMsr), VERR_NOT_SUPPORTED);
1223
1224 Assert(iBit <= 0x1fff);
1225 if (ASMBitTest(pbMsrBitmap, iBit))
1226 *penmRead = VMXMSREXIT_INTERCEPT_READ;
1227 else
1228 *penmRead = VMXMSREXIT_PASSTHRU_READ;
1229
1230 if (ASMBitTest(pbMsrBitmap + 0x800, iBit))
1231 *penmWrite = VMXMSREXIT_INTERCEPT_WRITE;
1232 else
1233 *penmWrite = VMXMSREXIT_PASSTHRU_WRITE;
1234 return VINF_SUCCESS;
1235}
1236#endif /* VBOX_STRICT */
1237
1238
1239/**
1240 * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
1241 * area.
1242 *
1243 * @returns VBox status code.
1244 * @param pVCpu The cross context virtual CPU structure.
1245 * @param cMsrs The number of MSRs.
1246 */
1247DECLINLINE(int) hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)
1248{
1249 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
1250 uint32_t const cMaxSupportedMsrs = MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc);
1251 if (RT_UNLIKELY(cMsrs > cMaxSupportedMsrs))
1252 {
1253 LogRel(("CPU auto-load/store MSR count in VMCS exceeded cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs));
1254 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
1255 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1256 }
1257
1258 /* Update number of guest MSRs to load/store across the world-switch. */
1259 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs);
1260 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs);
1261
1262 /* Update number of host MSRs to load after the world-switch. Identical to guest-MSR count as it's always paired. */
1263 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs);
1264 AssertRCReturn(rc, rc);
1265
1266 /* Update the VCPU's copy of the MSR count. */
1267 pVCpu->hm.s.vmx.cMsrs = cMsrs;
1268
1269 return VINF_SUCCESS;
1270}
1271
1272
1273/**
1274 * Adds a new (or updates the value of an existing) guest/host MSR
1275 * pair to be swapped during the world-switch as part of the
1276 * auto-load/store MSR area in the VMCS.
1277 *
1278 * @returns VBox status code.
1279 * @param pVCpu The cross context virtual CPU structure.
1280 * @param uMsr The MSR.
1281 * @param uGuestMsrValue Value of the guest MSR.
1282 * @param fUpdateHostMsr Whether to update the value of the host MSR if
1283 * necessary.
1284 * @param pfAddedAndUpdated Where to store whether the MSR was added -and-
1285 * its value was updated. Optional, can be NULL.
1286 */
1287static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr,
1288 bool *pfAddedAndUpdated)
1289{
1290 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1291 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1292 uint32_t i;
1293 for (i = 0; i < cMsrs; i++)
1294 {
1295 if (pGuestMsr->u32Msr == uMsr)
1296 break;
1297 pGuestMsr++;
1298 }
1299
1300 bool fAdded = false;
1301 if (i == cMsrs)
1302 {
1303 ++cMsrs;
1304 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1305 AssertMsgRCReturn(rc, ("hmR0VmxAddAutoLoadStoreMsr: Insufficient space to add MSR %u\n", uMsr), rc);
1306
1307 /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */
1308 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1309 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1310
1311 fAdded = true;
1312 }
1313
1314 /* Update the MSR values in the auto-load/store MSR area. */
1315 pGuestMsr->u32Msr = uMsr;
1316 pGuestMsr->u64Value = uGuestMsrValue;
1317
1318 /* Create/update the MSR slot in the host MSR area. */
1319 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1320 pHostMsr += i;
1321 pHostMsr->u32Msr = uMsr;
1322
1323 /*
1324 * Update the host MSR only when requested by the caller AND when we're
1325 * adding it to the auto-load/store area. Otherwise, it would have been
1326 * updated by hmR0VmxSaveHostMsrs(). We do this for performance reasons.
1327 */
1328 bool fUpdatedMsrValue = false;
1329 if ( fAdded
1330 && fUpdateHostMsr)
1331 {
1332 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1333 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1334 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1335 fUpdatedMsrValue = true;
1336 }
1337
1338 if (pfAddedAndUpdated)
1339 *pfAddedAndUpdated = fUpdatedMsrValue;
1340 return VINF_SUCCESS;
1341}
1342
1343
1344/**
1345 * Removes a guest/host MSR pair to be swapped during the world-switch from the
1346 * auto-load/store MSR area in the VMCS.
1347 *
1348 * @returns VBox status code.
1349 * @param pVCpu The cross context virtual CPU structure.
1350 * @param uMsr The MSR.
1351 */
1352static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr)
1353{
1354 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1355 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1356 for (uint32_t i = 0; i < cMsrs; i++)
1357 {
1358 /* Find the MSR. */
1359 if (pGuestMsr->u32Msr == uMsr)
1360 {
1361 /* If it's the last MSR, simply reduce the count. */
1362 if (i == cMsrs - 1)
1363 {
1364 --cMsrs;
1365 break;
1366 }
1367
1368 /* Remove it by swapping the last MSR in place of it, and reducing the count. */
1369 PVMXAUTOMSR pLastGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1370 pLastGuestMsr += cMsrs - 1;
1371 pGuestMsr->u32Msr = pLastGuestMsr->u32Msr;
1372 pGuestMsr->u64Value = pLastGuestMsr->u64Value;
1373
1374 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1375 PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1376 pLastHostMsr += cMsrs - 1;
1377 pHostMsr->u32Msr = pLastHostMsr->u32Msr;
1378 pHostMsr->u64Value = pLastHostMsr->u64Value;
1379 --cMsrs;
1380 break;
1381 }
1382 pGuestMsr++;
1383 }
1384
1385 /* Update the VMCS if the count changed (meaning the MSR was found). */
1386 if (cMsrs != pVCpu->hm.s.vmx.cMsrs)
1387 {
1388 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1389 AssertRCReturn(rc, rc);
1390
1391 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
1392 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1393 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
1394
1395 Log4(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));
1396 return VINF_SUCCESS;
1397 }
1398
1399 return VERR_NOT_FOUND;
1400}
1401
1402
1403/**
1404 * Checks if the specified guest MSR is part of the auto-load/store area in
1405 * the VMCS.
1406 *
1407 * @returns true if found, false otherwise.
1408 * @param pVCpu The cross context virtual CPU structure.
1409 * @param uMsr The MSR to find.
1410 */
1411static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1412{
1413 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1414 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1415
1416 for (uint32_t i = 0; i < cMsrs; i++, pGuestMsr++)
1417 {
1418 if (pGuestMsr->u32Msr == uMsr)
1419 return true;
1420 }
1421 return false;
1422}
1423
1424
1425/**
1426 * Updates the value of all host MSRs in the auto-load/store area in the VMCS.
1427 *
1428 * @param pVCpu The cross context virtual CPU structure.
1429 *
1430 * @remarks No-long-jump zone!!!
1431 */
1432static void hmR0VmxUpdateAutoLoadStoreHostMsrs(PVMCPU pVCpu)
1433{
1434 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1435 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1436 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1437 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1438
1439 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1440 {
1441 AssertReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr);
1442
1443 /*
1444 * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
1445 * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
1446 */
1447 if (pHostMsr->u32Msr == MSR_K6_EFER)
1448 pHostMsr->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostEfer;
1449 else
1450 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1451 }
1452
1453 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
1454}
1455
1456
1457#if HC_ARCH_BITS == 64
1458/**
1459 * Saves a set of host MSRs to allow read/write passthru access to the guest and
1460 * perform lazy restoration of the host MSRs while leaving VT-x.
1461 *
1462 * @param pVCpu The cross context virtual CPU structure.
1463 *
1464 * @remarks No-long-jump zone!!!
1465 */
1466static void hmR0VmxLazySaveHostMsrs(PVMCPU pVCpu)
1467{
1468 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1469
1470 /*
1471 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls().
1472 */
1473 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
1474 {
1475 pVCpu->hm.s.vmx.u64HostLStarMsr = ASMRdMsr(MSR_K8_LSTAR);
1476 pVCpu->hm.s.vmx.u64HostStarMsr = ASMRdMsr(MSR_K6_STAR);
1477 pVCpu->hm.s.vmx.u64HostSFMaskMsr = ASMRdMsr(MSR_K8_SF_MASK);
1478 pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1479 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;
1480 }
1481}
1482
1483
1484/**
1485 * Checks whether the MSR belongs to the set of guest MSRs that we restore
1486 * lazily while leaving VT-x.
1487 *
1488 * @returns true if it does, false otherwise.
1489 * @param pVCpu The cross context virtual CPU structure.
1490 * @param uMsr The MSR to check.
1491 */
1492static bool hmR0VmxIsLazyGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1493{
1494 NOREF(pVCpu);
1495 switch (uMsr)
1496 {
1497 case MSR_K8_LSTAR:
1498 case MSR_K6_STAR:
1499 case MSR_K8_SF_MASK:
1500 case MSR_K8_KERNEL_GS_BASE:
1501 return true;
1502 }
1503 return false;
1504}
1505
1506
1507/**
1508 * Saves a set of guest MSRs back into the guest-CPU context.
1509 *
1510 * @param pVCpu The cross context virtual CPU structure.
1511 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1512 * out-of-sync. Make sure to update the required fields
1513 * before using them.
1514 *
1515 * @remarks No-long-jump zone!!!
1516 */
1517static void hmR0VmxLazySaveGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1518{
1519 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1520 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1521
1522 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1523 {
1524 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1525 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
1526 pMixedCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
1527 pMixedCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
1528 pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1529 }
1530}
1531
1532
1533/**
1534 * Loads a set of guests MSRs to allow read/passthru to the guest.
1535 *
1536 * The name of this function is slightly confusing. This function does NOT
1537 * postpone loading, but loads the MSR right now. "hmR0VmxLazy" is simply a
1538 * common prefix for functions dealing with "lazy restoration" of the shared
1539 * MSRs.
1540 *
1541 * @param pVCpu The cross context virtual CPU structure.
1542 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1543 * out-of-sync. Make sure to update the required fields
1544 * before using them.
1545 *
1546 * @remarks No-long-jump zone!!!
1547 */
1548static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1549{
1550 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1551 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1552
1553#define VMXLOCAL_LAZY_LOAD_GUEST_MSR(uMsr, a_GuestMsr, a_HostMsr) \
1554 do { \
1555 if (pMixedCtx->msr##a_GuestMsr != pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr) \
1556 ASMWrMsr(uMsr, pMixedCtx->msr##a_GuestMsr); \
1557 else \
1558 Assert(ASMRdMsr(uMsr) == pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr); \
1559 } while (0)
1560
1561 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1562 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
1563 {
1564 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_LSTAR, LSTAR, LStar);
1565 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K6_STAR, STAR, Star);
1566 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, SFMask);
1567 VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBase);
1568 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;
1569 }
1570 else
1571 {
1572 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR);
1573 ASMWrMsr(MSR_K6_STAR, pMixedCtx->msrSTAR);
1574 ASMWrMsr(MSR_K8_SF_MASK, pMixedCtx->msrSFMASK);
1575 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE);
1576 }
1577
1578#undef VMXLOCAL_LAZY_LOAD_GUEST_MSR
1579}
1580
1581
1582/**
1583 * Performs lazy restoration of the set of host MSRs if they were previously
1584 * loaded with guest MSR values.
1585 *
1586 * @param pVCpu The cross context virtual CPU structure.
1587 *
1588 * @remarks No-long-jump zone!!!
1589 * @remarks The guest MSRs should have been saved back into the guest-CPU
1590 * context by hmR0VmxSaveGuestLazyMsrs()!!!
1591 */
1592static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu)
1593{
1594 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1595 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1596
1597 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1598 {
1599 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1600 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64HostLStarMsr);
1601 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64HostStarMsr);
1602 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64HostSFMaskMsr);
1603 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr);
1604 }
1605 pVCpu->hm.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST);
1606}
1607#endif /* HC_ARCH_BITS == 64 */
1608
1609
1610/**
1611 * Verifies that our cached values of the VMCS controls are all
1612 * consistent with what's actually present in the VMCS.
1613 *
1614 * @returns VBox status code.
1615 * @param pVCpu The cross context virtual CPU structure.
1616 */
1617static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu)
1618{
1619 uint32_t u32Val;
1620 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
1621 AssertRCReturn(rc, rc);
1622 AssertMsgReturn(pVCpu->hm.s.vmx.u32EntryCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32EntryCtls, u32Val),
1623 VERR_VMX_ENTRY_CTLS_CACHE_INVALID);
1624
1625 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
1626 AssertRCReturn(rc, rc);
1627 AssertMsgReturn(pVCpu->hm.s.vmx.u32ExitCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ExitCtls, u32Val),
1628 VERR_VMX_EXIT_CTLS_CACHE_INVALID);
1629
1630 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1631 AssertRCReturn(rc, rc);
1632 AssertMsgReturn(pVCpu->hm.s.vmx.u32PinCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32PinCtls, u32Val),
1633 VERR_VMX_PIN_EXEC_CTLS_CACHE_INVALID);
1634
1635 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1636 AssertRCReturn(rc, rc);
1637 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls, u32Val),
1638 VERR_VMX_PROC_EXEC_CTLS_CACHE_INVALID);
1639
1640 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1641 {
1642 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1643 AssertRCReturn(rc, rc);
1644 AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls2 == u32Val,
1645 ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls2, u32Val),
1646 VERR_VMX_PROC_EXEC2_CTLS_CACHE_INVALID);
1647 }
1648
1649 return VINF_SUCCESS;
1650}
1651
1652
1653#ifdef VBOX_STRICT
1654/**
1655 * Verifies that our cached host EFER value has not changed
1656 * since we cached it.
1657 *
1658 * @param pVCpu The cross context virtual CPU structure.
1659 */
1660static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu)
1661{
1662 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1663
1664 if (pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
1665 {
1666 uint64_t u64Val;
1667 int rc = VMXReadVmcs64(VMX_VMCS64_HOST_EFER_FULL, &u64Val);
1668 AssertRC(rc);
1669
1670 uint64_t u64HostEferMsr = ASMRdMsr(MSR_K6_EFER);
1671 AssertMsgReturnVoid(u64HostEferMsr == u64Val, ("u64HostEferMsr=%#RX64 u64Val=%#RX64\n", u64HostEferMsr, u64Val));
1672 }
1673}
1674
1675
1676/**
1677 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the
1678 * VMCS are correct.
1679 *
1680 * @param pVCpu The cross context virtual CPU structure.
1681 */
1682static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu)
1683{
1684 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1685
1686 /* Verify MSR counts in the VMCS are what we think it should be. */
1687 uint32_t cMsrs;
1688 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1689 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1690
1691 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs); AssertRC(rc);
1692 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1693
1694 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1695 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1696
1697 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1698 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1699 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1700 {
1701 /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */
1702 AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32 cMsrs=%u\n", pHostMsr->u32Msr,
1703 pGuestMsr->u32Msr, cMsrs));
1704
1705 uint64_t u64Msr = ASMRdMsr(pHostMsr->u32Msr);
1706 AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
1707 pHostMsr->u32Msr, pHostMsr->u64Value, u64Msr, cMsrs));
1708
1709 /* Verify that the permissions are as expected in the MSR bitmap. */
1710 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1711 {
1712 VMXMSREXITREAD enmRead;
1713 VMXMSREXITWRITE enmWrite;
1714 rc = hmR0VmxGetMsrPermission(pVCpu, pGuestMsr->u32Msr, &enmRead, &enmWrite);
1715 AssertMsgReturnVoid(rc == VINF_SUCCESS, ("hmR0VmxGetMsrPermission! failed. rc=%Rrc\n", rc));
1716 if (pGuestMsr->u32Msr == MSR_K6_EFER)
1717 {
1718 AssertMsgReturnVoid(enmRead == VMXMSREXIT_INTERCEPT_READ, ("Passthru read for EFER!?\n"));
1719 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_INTERCEPT_WRITE, ("Passthru write for EFER!?\n"));
1720 }
1721 else
1722 {
1723 AssertMsgReturnVoid(enmRead == VMXMSREXIT_PASSTHRU_READ, ("u32Msr=%#RX32 cMsrs=%u No passthru read!\n",
1724 pGuestMsr->u32Msr, cMsrs));
1725 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 cMsrs=%u No passthru write!\n",
1726 pGuestMsr->u32Msr, cMsrs));
1727 }
1728 }
1729 }
1730}
1731#endif /* VBOX_STRICT */
1732
1733
1734/**
1735 * Flushes the TLB using EPT.
1736 *
1737 * @returns VBox status code.
1738 * @param pVCpu The cross context virtual CPU structure of the calling
1739 * EMT. Can be NULL depending on @a enmFlush.
1740 * @param enmFlush Type of flush.
1741 *
1742 * @remarks Caller is responsible for making sure this function is called only
1743 * when NestedPaging is supported and providing @a enmFlush that is
1744 * supported by the CPU.
1745 * @remarks Can be called with interrupts disabled.
1746 */
1747static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXFLUSHEPT enmFlush)
1748{
1749 uint64_t au64Descriptor[2];
1750 if (enmFlush == VMXFLUSHEPT_ALL_CONTEXTS)
1751 au64Descriptor[0] = 0;
1752 else
1753 {
1754 Assert(pVCpu);
1755 au64Descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1756 }
1757 au64Descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1758
1759 int rc = VMXR0InvEPT(enmFlush, &au64Descriptor[0]);
1760 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0,
1761 rc));
1762 if ( RT_SUCCESS(rc)
1763 && pVCpu)
1764 {
1765 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1766 }
1767}
1768
1769
1770/**
1771 * Flushes the TLB using VPID.
1772 *
1773 * @returns VBox status code.
1774 * @param pVM The cross context VM structure.
1775 * @param pVCpu The cross context virtual CPU structure of the calling
1776 * EMT. Can be NULL depending on @a enmFlush.
1777 * @param enmFlush Type of flush.
1778 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1779 * on @a enmFlush).
1780 *
1781 * @remarks Can be called with interrupts disabled.
1782 */
1783static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr)
1784{
1785 NOREF(pVM);
1786 AssertPtr(pVM);
1787 Assert(pVM->hm.s.vmx.fVpid);
1788
1789 uint64_t au64Descriptor[2];
1790 if (enmFlush == VMXFLUSHVPID_ALL_CONTEXTS)
1791 {
1792 au64Descriptor[0] = 0;
1793 au64Descriptor[1] = 0;
1794 }
1795 else
1796 {
1797 AssertPtr(pVCpu);
1798 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1799 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1800 au64Descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1801 au64Descriptor[1] = GCPtr;
1802 }
1803
1804 int rc = VMXR0InvVPID(enmFlush, &au64Descriptor[0]); NOREF(rc);
1805 AssertMsg(rc == VINF_SUCCESS,
1806 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1807 if ( RT_SUCCESS(rc)
1808 && pVCpu)
1809 {
1810 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1811 }
1812}
1813
1814
1815/**
1816 * Invalidates a guest page by guest virtual address. Only relevant for
1817 * EPT/VPID, otherwise there is nothing really to invalidate.
1818 *
1819 * @returns VBox status code.
1820 * @param pVM The cross context VM structure.
1821 * @param pVCpu The cross context virtual CPU structure.
1822 * @param GCVirt Guest virtual address of the page to invalidate.
1823 */
1824VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1825{
1826 AssertPtr(pVM);
1827 AssertPtr(pVCpu);
1828 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
1829
1830 bool fFlushPending = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
1831 if (!fFlushPending)
1832 {
1833 /*
1834 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
1835 * See @bugref{6043} and @bugref{6177}.
1836 *
1837 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
1838 * function maybe called in a loop with individual addresses.
1839 */
1840 if (pVM->hm.s.vmx.fVpid)
1841 {
1842 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1843 {
1844 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, GCVirt);
1845 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1846 }
1847 else
1848 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1849 }
1850 else if (pVM->hm.s.fNestedPaging)
1851 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1852 }
1853
1854 return VINF_SUCCESS;
1855}
1856
1857
1858/**
1859 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
1860 * otherwise there is nothing really to invalidate.
1861 *
1862 * @returns VBox status code.
1863 * @param pVM The cross context VM structure.
1864 * @param pVCpu The cross context virtual CPU structure.
1865 * @param GCPhys Guest physical address of the page to invalidate.
1866 */
1867VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1868{
1869 NOREF(pVM); NOREF(GCPhys);
1870 LogFlowFunc(("%RGp\n", GCPhys));
1871
1872 /*
1873 * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
1874 * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
1875 * This function might be called in a loop. This should cause a flush-by-EPT if EPT is in use. See @bugref{6568}.
1876 */
1877 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1878 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
1879 return VINF_SUCCESS;
1880}
1881
1882
1883/**
1884 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1885 * case where neither EPT nor VPID is supported by the CPU.
1886 *
1887 * @param pVM The cross context VM structure.
1888 * @param pVCpu The cross context virtual CPU structure.
1889 * @param pCpu Pointer to the global HM struct.
1890 *
1891 * @remarks Called with interrupts disabled.
1892 */
1893static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1894{
1895 AssertPtr(pVCpu);
1896 AssertPtr(pCpu);
1897 NOREF(pVM);
1898
1899 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1900
1901 Assert(pCpu->idCpu != NIL_RTCPUID);
1902 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1903 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1904 pVCpu->hm.s.fForceTLBFlush = false;
1905 return;
1906}
1907
1908
1909/**
1910 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1911 *
1912 * @param pVM The cross context VM structure.
1913 * @param pVCpu The cross context virtual CPU structure.
1914 * @param pCpu Pointer to the global HM CPU struct.
1915 * @remarks All references to "ASID" in this function pertains to "VPID" in
1916 * Intel's nomenclature. The reason is, to avoid confusion in compare
1917 * statements since the host-CPU copies are named "ASID".
1918 *
1919 * @remarks Called with interrupts disabled.
1920 */
1921static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1922{
1923#ifdef VBOX_WITH_STATISTICS
1924 bool fTlbFlushed = false;
1925# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
1926# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
1927 if (!fTlbFlushed) \
1928 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
1929 } while (0)
1930#else
1931# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
1932# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
1933#endif
1934
1935 AssertPtr(pVM);
1936 AssertPtr(pCpu);
1937 AssertPtr(pVCpu);
1938 Assert(pCpu->idCpu != NIL_RTCPUID);
1939
1940 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1941 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1942 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1943
1944 /*
1945 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1946 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1947 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1948 */
1949 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1950 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1951 {
1952 ++pCpu->uCurrentAsid;
1953 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1954 {
1955 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
1956 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1957 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1958 }
1959
1960 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1961 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1962 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1963
1964 /*
1965 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
1966 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
1967 */
1968 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1969 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1970 HMVMX_SET_TAGGED_TLB_FLUSHED();
1971 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); /* Already flushed-by-EPT, skip doing it again below. */
1972 }
1973
1974 /* Check for explicit TLB flushes. */
1975 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1976 {
1977 /*
1978 * Changes to the EPT paging structure by VMM requires flushing by EPT as the CPU creates
1979 * guest-physical (only EPT-tagged) mappings while traversing the EPT tables when EPT is in use.
1980 * Flushing by VPID will only flush linear (only VPID-tagged) and combined (EPT+VPID tagged) mappings
1981 * but not guest-physical mappings.
1982 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}.
1983 */
1984 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1985 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1986 HMVMX_SET_TAGGED_TLB_FLUSHED();
1987 }
1988
1989 pVCpu->hm.s.fForceTLBFlush = false;
1990 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
1991
1992 Assert(pVCpu->hm.s.idLastCpu == pCpu->idCpu);
1993 Assert(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes);
1994 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1995 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1996 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1997 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
1998 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
1999 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2000 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2001
2002 /* Update VMCS with the VPID. */
2003 int rc = VMXWriteVmcs32(VMX_VMCS16_VPID, pVCpu->hm.s.uCurrentAsid);
2004 AssertRC(rc);
2005
2006#undef HMVMX_SET_TAGGED_TLB_FLUSHED
2007}
2008
2009
2010/**
2011 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
2012 *
2013 * @returns VBox status code.
2014 * @param pVM The cross context VM structure.
2015 * @param pVCpu The cross context virtual CPU structure.
2016 * @param pCpu Pointer to the global HM CPU struct.
2017 *
2018 * @remarks Called with interrupts disabled.
2019 */
2020static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2021{
2022 AssertPtr(pVM);
2023 AssertPtr(pVCpu);
2024 AssertPtr(pCpu);
2025 Assert(pCpu->idCpu != NIL_RTCPUID);
2026 AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
2027 AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
2028
2029 /*
2030 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
2031 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
2032 */
2033 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2034 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2035 {
2036 pVCpu->hm.s.fForceTLBFlush = true;
2037 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2038 }
2039
2040 /* Check for explicit TLB flushes. */
2041 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2042 {
2043 pVCpu->hm.s.fForceTLBFlush = true;
2044 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2045 }
2046
2047 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2048 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2049
2050 if (pVCpu->hm.s.fForceTLBFlush)
2051 {
2052 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2053 pVCpu->hm.s.fForceTLBFlush = false;
2054 }
2055}
2056
2057
2058/**
2059 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
2060 *
2061 * @returns VBox status code.
2062 * @param pVM The cross context VM structure.
2063 * @param pVCpu The cross context virtual CPU structure.
2064 * @param pCpu Pointer to the global HM CPU struct.
2065 *
2066 * @remarks Called with interrupts disabled.
2067 */
2068static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2069{
2070 AssertPtr(pVM);
2071 AssertPtr(pVCpu);
2072 AssertPtr(pCpu);
2073 Assert(pCpu->idCpu != NIL_RTCPUID);
2074 AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
2075 AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
2076
2077 /*
2078 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
2079 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
2080 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
2081 */
2082 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2083 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2084 {
2085 pVCpu->hm.s.fForceTLBFlush = true;
2086 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2087 }
2088
2089 /* Check for explicit TLB flushes. */
2090 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2091 {
2092 /*
2093 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
2094 * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
2095 * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
2096 */
2097 pVCpu->hm.s.fForceTLBFlush = true;
2098 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2099 }
2100
2101 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2102 if (pVCpu->hm.s.fForceTLBFlush)
2103 {
2104 ++pCpu->uCurrentAsid;
2105 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
2106 {
2107 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
2108 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
2109 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
2110 }
2111
2112 pVCpu->hm.s.fForceTLBFlush = false;
2113 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2114 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
2115 if (pCpu->fFlushAsidBeforeUse)
2116 {
2117 if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_SINGLE_CONTEXT)
2118 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
2119 else if (pVM->hm.s.vmx.enmFlushVpid == VMXFLUSHVPID_ALL_CONTEXTS)
2120 {
2121 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
2122 pCpu->fFlushAsidBeforeUse = false;
2123 }
2124 else
2125 {
2126 /* hmR0VmxSetupTaggedTlb() ensures we never get here. Paranoia. */
2127 AssertMsgFailed(("Unsupported VPID-flush context type.\n"));
2128 }
2129 }
2130 }
2131
2132 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2133 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2134 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2135 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
2136 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2137 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2138 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2139
2140 int rc = VMXWriteVmcs32(VMX_VMCS16_VPID, pVCpu->hm.s.uCurrentAsid);
2141 AssertRC(rc);
2142}
2143
2144
2145/**
2146 * Flushes the guest TLB entry based on CPU capabilities.
2147 *
2148 * @param pVCpu The cross context virtual CPU structure.
2149 * @param pCpu Pointer to the global HM CPU struct.
2150 */
2151DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2152{
2153#ifdef HMVMX_ALWAYS_FLUSH_TLB
2154 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2155#endif
2156 PVM pVM = pVCpu->CTX_SUFF(pVM);
2157 switch (pVM->hm.s.vmx.uFlushTaggedTlb)
2158 {
2159 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu, pCpu); break;
2160 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu, pCpu); break;
2161 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu, pCpu); break;
2162 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu, pCpu); break;
2163 default:
2164 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
2165 break;
2166 }
2167
2168 /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */
2169}
2170
2171
2172/**
2173 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
2174 * TLB entries from the host TLB before VM-entry.
2175 *
2176 * @returns VBox status code.
2177 * @param pVM The cross context VM structure.
2178 */
2179static int hmR0VmxSetupTaggedTlb(PVM pVM)
2180{
2181 /*
2182 * Determine optimal flush type for Nested Paging.
2183 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
2184 * guest execution (see hmR3InitFinalizeR0()).
2185 */
2186 if (pVM->hm.s.fNestedPaging)
2187 {
2188 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
2189 {
2190 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
2191 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_SINGLE_CONTEXT;
2192 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
2193 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_ALL_CONTEXTS;
2194 else
2195 {
2196 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
2197 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2198 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_FLUSH_TYPE_UNSUPPORTED;
2199 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2200 }
2201
2202 /* Make sure the write-back cacheable memory type for EPT is supported. */
2203 if (RT_UNLIKELY(!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB)))
2204 {
2205 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2206 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_MEM_TYPE_NOT_WB;
2207 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2208 }
2209
2210 /* EPT requires a page-walk length of 4. */
2211 if (RT_UNLIKELY(!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4)))
2212 {
2213 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2214 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_PAGE_WALK_LENGTH_UNSUPPORTED;
2215 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2216 }
2217 }
2218 else
2219 {
2220 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
2221 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NOT_SUPPORTED;
2222 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_INVEPT_UNAVAILABLE;
2223 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2224 }
2225 }
2226
2227 /*
2228 * Determine optimal flush type for VPID.
2229 */
2230 if (pVM->hm.s.vmx.fVpid)
2231 {
2232 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
2233 {
2234 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
2235 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_SINGLE_CONTEXT;
2236 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
2237 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_ALL_CONTEXTS;
2238 else
2239 {
2240 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
2241 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2242 LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
2243 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
2244 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
2245 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
2246 pVM->hm.s.vmx.fVpid = false;
2247 }
2248 }
2249 else
2250 {
2251 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
2252 Log4(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
2253 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
2254 pVM->hm.s.vmx.fVpid = false;
2255 }
2256 }
2257
2258 /*
2259 * Setup the handler for flushing tagged-TLBs.
2260 */
2261 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
2262 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID;
2263 else if (pVM->hm.s.fNestedPaging)
2264 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT;
2265 else if (pVM->hm.s.vmx.fVpid)
2266 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID;
2267 else
2268 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE;
2269 return VINF_SUCCESS;
2270}
2271
2272
2273/**
2274 * Sets up pin-based VM-execution controls in the VMCS.
2275 *
2276 * @returns VBox status code.
2277 * @param pVM The cross context VM structure.
2278 * @param pVCpu The cross context virtual CPU structure.
2279 */
2280static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
2281{
2282 AssertPtr(pVM);
2283 AssertPtr(pVCpu);
2284
2285 uint32_t val = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */
2286 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
2287
2288 val |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts cause a VM-exit. */
2289 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */
2290
2291 if (pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
2292 val |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */
2293
2294 /* Enable the VMX preemption timer. */
2295 if (pVM->hm.s.vmx.fUsePreemptTimer)
2296 {
2297 Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
2298 val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
2299 }
2300
2301 if ((val & zap) != val)
2302 {
2303 LogRel(("hmR0VmxSetupPinCtls: Invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
2304 pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, val, zap));
2305 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
2306 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2307 }
2308
2309 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, val);
2310 AssertRCReturn(rc, rc);
2311
2312 pVCpu->hm.s.vmx.u32PinCtls = val;
2313 return rc;
2314}
2315
2316
2317/**
2318 * Sets up processor-based VM-execution controls in the VMCS.
2319 *
2320 * @returns VBox status code.
2321 * @param pVM The cross context VM structure.
2322 * @param pVCpu The cross context virtual CPU structure.
2323 */
2324static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
2325{
2326 AssertPtr(pVM);
2327 AssertPtr(pVCpu);
2328
2329 int rc = VERR_INTERNAL_ERROR_5;
2330 uint32_t val = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */
2331 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2332
2333 val |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */
2334 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
2335 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
2336 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
2337 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */
2338 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */
2339 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
2340
2341 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
2342 if ( !(pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
2343 || (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
2344 {
2345 LogRel(("hmR0VmxSetupProcCtls: Unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
2346 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
2347 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2348 }
2349
2350 /* Without Nested Paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
2351 if (!pVM->hm.s.fNestedPaging)
2352 {
2353 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
2354 val |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
2355 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
2356 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
2357 }
2358
2359 /* Use TPR shadowing if supported by the CPU. */
2360 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
2361 {
2362 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2363 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
2364 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
2365 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
2366 AssertRCReturn(rc, rc);
2367
2368 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
2369 /* CR8 writes cause a VM-exit based on TPR threshold. */
2370 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
2371 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
2372 }
2373 else
2374 {
2375 /*
2376 * Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is invalid on 32-bit Intel CPUs.
2377 * Set this control only for 64-bit guests.
2378 */
2379 if (pVM->hm.s.fAllow64BitGuests)
2380 {
2381 val |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */
2382 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */
2383 }
2384 }
2385
2386 /* Use MSR-bitmaps if supported by the CPU. */
2387 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
2388 {
2389 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
2390
2391 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2392 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
2393 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2394 AssertRCReturn(rc, rc);
2395
2396 /*
2397 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
2398 * automatically using dedicated fields in the VMCS.
2399 */
2400 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2401 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2402 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2403 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2404 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2405
2406#if HC_ARCH_BITS == 64
2407 /*
2408 * Set passthru permissions for the following MSRs (mandatory for VT-x) required for 64-bit guests.
2409 */
2410 if (pVM->hm.s.fAllow64BitGuests)
2411 {
2412 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2413 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2414 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2415 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2416 }
2417#endif
2418 }
2419
2420 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
2421 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
2422 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
2423
2424 if ((val & zap) != val)
2425 {
2426 LogRel(("hmR0VmxSetupProcCtls: Invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
2427 pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, val, zap));
2428 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
2429 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2430 }
2431
2432 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, val);
2433 AssertRCReturn(rc, rc);
2434
2435 pVCpu->hm.s.vmx.u32ProcCtls = val;
2436
2437 /*
2438 * Secondary processor-based VM-execution controls.
2439 */
2440 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
2441 {
2442 val = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
2443 zap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2444
2445 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
2446 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
2447
2448 if (pVM->hm.s.fNestedPaging)
2449 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */
2450 else
2451 {
2452 /*
2453 * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
2454 * VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT when INVPCID is executed by the guest.
2455 * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
2456 */
2457 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
2458 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
2459 }
2460
2461 if (pVM->hm.s.vmx.fVpid)
2462 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */
2463
2464 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2465 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
2466
2467 /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
2468 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
2469 * done dynamically. */
2470 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
2471 {
2472 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
2473 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
2474 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
2475 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
2476 AssertRCReturn(rc, rc);
2477 }
2478
2479 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
2480 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
2481
2482 if ( pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT
2483 && pVM->hm.s.vmx.cPleGapTicks
2484 && pVM->hm.s.vmx.cPleWindowTicks)
2485 {
2486 val |= VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT; /* Enable pause-loop exiting. */
2487
2488 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks);
2489 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks);
2490 AssertRCReturn(rc, rc);
2491 }
2492
2493 if ((val & zap) != val)
2494 {
2495 LogRel(("hmR0VmxSetupProcCtls: Invalid secondary processor-based VM-execution controls combo! "
2496 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, val, zap));
2497 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
2498 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2499 }
2500
2501 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, val);
2502 AssertRCReturn(rc, rc);
2503
2504 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
2505 }
2506 else if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest))
2507 {
2508 LogRel(("hmR0VmxSetupProcCtls: Unrestricted Guest set as true when secondary processor-based VM-execution controls not "
2509 "available\n"));
2510 pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;
2511 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2512 }
2513
2514 return VINF_SUCCESS;
2515}
2516
2517
2518/**
2519 * Sets up miscellaneous (everything other than Pin & Processor-based
2520 * VM-execution) control fields in the VMCS.
2521 *
2522 * @returns VBox status code.
2523 * @param pVM The cross context VM structure.
2524 * @param pVCpu The cross context virtual CPU structure.
2525 */
2526static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
2527{
2528 NOREF(pVM);
2529 AssertPtr(pVM);
2530 AssertPtr(pVCpu);
2531
2532 int rc = VERR_GENERAL_FAILURE;
2533
2534 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2535#if 0
2536 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestCR3AndCR4())*/
2537 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0);
2538 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0);
2539
2540 /*
2541 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
2542 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
2543 * We thus use the exception bitmap to control it rather than use both.
2544 */
2545 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0);
2546 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0);
2547
2548 /** @todo Explore possibility of using IO-bitmaps. */
2549 /* All IO & IOIO instructions cause VM-exits. */
2550 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0);
2551 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0);
2552
2553 /* Initialize the MSR-bitmap area. */
2554 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
2555 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
2556 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0);
2557 AssertRCReturn(rc, rc);
2558#endif
2559
2560 /* Setup MSR auto-load/store area. */
2561 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
2562 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
2563 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2564 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2565 AssertRCReturn(rc, rc);
2566
2567 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
2568 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
2569 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
2570 AssertRCReturn(rc, rc);
2571
2572 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
2573 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
2574 AssertRCReturn(rc, rc);
2575
2576 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2577#if 0
2578 /* Setup debug controls */
2579 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
2580 rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
2581 AssertRCReturn(rc, rc);
2582#endif
2583
2584 return rc;
2585}
2586
2587
2588/**
2589 * Sets up the initial exception bitmap in the VMCS based on static conditions.
2590 *
2591 * @returns VBox status code.
2592 * @param pVM The cross context VM structure.
2593 * @param pVCpu The cross context virtual CPU structure.
2594 */
2595static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
2596{
2597 AssertPtr(pVM);
2598 AssertPtr(pVCpu);
2599
2600 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
2601
2602 uint32_t u32XcptBitmap = pVCpu->hm.s.fGIMTrapXcptUD ? RT_BIT(X86_XCPT_UD) : 0;
2603
2604 /* Must always intercept #AC to prevent the guest from hanging the CPU. */
2605 u32XcptBitmap |= RT_BIT_32(X86_XCPT_AC);
2606
2607 /* Because we need to maintain the DR6 state even when intercepting DRx reads
2608 and writes, and because recursive #DBs can cause the CPU hang, we must always
2609 intercept #DB. */
2610 u32XcptBitmap |= RT_BIT_32(X86_XCPT_DB);
2611
2612 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
2613 if (!pVM->hm.s.fNestedPaging)
2614 u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
2615
2616 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
2617 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
2618 AssertRCReturn(rc, rc);
2619 return rc;
2620}
2621
2622
2623/**
2624 * Sets up the initial guest-state mask. The guest-state mask is consulted
2625 * before reading guest-state fields from the VMCS as VMREADs can be expensive
2626 * for the nested virtualization case (as it would cause a VM-exit).
2627 *
2628 * @param pVCpu The cross context virtual CPU structure.
2629 */
2630static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
2631{
2632 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
2633 HMVMXCPU_GST_RESET_TO(pVCpu, HMVMX_UPDATED_GUEST_ALL);
2634 return VINF_SUCCESS;
2635}
2636
2637
2638/**
2639 * Does per-VM VT-x initialization.
2640 *
2641 * @returns VBox status code.
2642 * @param pVM The cross context VM structure.
2643 */
2644VMMR0DECL(int) VMXR0InitVM(PVM pVM)
2645{
2646 LogFlowFunc(("pVM=%p\n", pVM));
2647
2648 int rc = hmR0VmxStructsAlloc(pVM);
2649 if (RT_FAILURE(rc))
2650 {
2651 LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
2652 return rc;
2653 }
2654
2655 return VINF_SUCCESS;
2656}
2657
2658
2659/**
2660 * Does per-VM VT-x termination.
2661 *
2662 * @returns VBox status code.
2663 * @param pVM The cross context VM structure.
2664 */
2665VMMR0DECL(int) VMXR0TermVM(PVM pVM)
2666{
2667 LogFlowFunc(("pVM=%p\n", pVM));
2668
2669#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2670 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
2671 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
2672#endif
2673 hmR0VmxStructsFree(pVM);
2674 return VINF_SUCCESS;
2675}
2676
2677
2678/**
2679 * Sets up the VM for execution under VT-x.
2680 * This function is only called once per-VM during initialization.
2681 *
2682 * @returns VBox status code.
2683 * @param pVM The cross context VM structure.
2684 */
2685VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
2686{
2687 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
2688 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2689
2690 LogFlowFunc(("pVM=%p\n", pVM));
2691
2692 /*
2693 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
2694 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0Intel().
2695 */
2696 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
2697 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
2698 || !pVM->hm.s.vmx.pRealModeTSS))
2699 {
2700 LogRel(("VMXR0SetupVM: Invalid real-on-v86 state.\n"));
2701 return VERR_INTERNAL_ERROR;
2702 }
2703
2704 /* Initialize these always, see hmR3InitFinalizeR0().*/
2705 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NONE;
2706 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NONE;
2707
2708 /* Setup the tagged-TLB flush handlers. */
2709 int rc = hmR0VmxSetupTaggedTlb(pVM);
2710 if (RT_FAILURE(rc))
2711 {
2712 LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
2713 return rc;
2714 }
2715
2716 /* Check if we can use the VMCS controls for swapping the EFER MSR. */
2717 Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
2718#if HC_ARCH_BITS == 64
2719 if ( (pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
2720 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
2721 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR))
2722 {
2723 pVM->hm.s.vmx.fSupportsVmcsEfer = true;
2724 }
2725#endif
2726
2727 /* At least verify VMX is enabled, since we can't check if we're in VMX root mode without #GP'ing. */
2728 RTCCUINTREG uHostCR4 = ASMGetCR4();
2729 if (RT_UNLIKELY(!(uHostCR4 & X86_CR4_VMXE)))
2730 return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
2731
2732 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2733 {
2734 PVMCPU pVCpu = &pVM->aCpus[i];
2735 AssertPtr(pVCpu);
2736 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
2737
2738 /* Log the VCPU pointers, useful for debugging SMP VMs. */
2739 Log4(("VMXR0SetupVM: pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
2740
2741 /* Initialize the VM-exit history array with end-of-array markers (UINT16_MAX). */
2742 Assert(!pVCpu->hm.s.idxExitHistoryFree);
2743 HMCPU_EXIT_HISTORY_RESET(pVCpu);
2744
2745 /* Set revision dword at the beginning of the VMCS structure. */
2746 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
2747
2748 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
2749 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2750 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2751 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2752
2753 /* Load this VMCS as the current VMCS. */
2754 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2755 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2756 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2757
2758 rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
2759 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2760 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2761
2762 rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
2763 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2764 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2765
2766 rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
2767 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2768 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2769
2770 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
2771 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2772 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2773
2774 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
2775 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2776 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2777
2778#if HC_ARCH_BITS == 32
2779 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
2780 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2781 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2782#endif
2783
2784 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
2785 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2786 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2787 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2788
2789 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
2790
2791 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
2792 }
2793
2794 return VINF_SUCCESS;
2795}
2796
2797
2798/**
2799 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2800 * the VMCS.
2801 *
2802 * @returns VBox status code.
2803 * @param pVM The cross context VM structure.
2804 * @param pVCpu The cross context virtual CPU structure.
2805 */
2806DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
2807{
2808 NOREF(pVM); NOREF(pVCpu);
2809
2810 RTCCUINTREG uReg = ASMGetCR0();
2811 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2812 AssertRCReturn(rc, rc);
2813
2814 uReg = ASMGetCR3();
2815 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2816 AssertRCReturn(rc, rc);
2817
2818 uReg = ASMGetCR4();
2819 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2820 AssertRCReturn(rc, rc);
2821 return rc;
2822}
2823
2824
2825#if HC_ARCH_BITS == 64
2826/**
2827 * Macro for adjusting host segment selectors to satisfy VT-x's VM-entry
2828 * requirements. See hmR0VmxSaveHostSegmentRegs().
2829 */
2830# define VMXLOCAL_ADJUST_HOST_SEG(seg, selValue) \
2831 if ((selValue) & (X86_SEL_RPL | X86_SEL_LDT)) \
2832 { \
2833 bool fValidSelector = true; \
2834 if ((selValue) & X86_SEL_LDT) \
2835 { \
2836 uint32_t uAttr = ASMGetSegAttr((selValue)); \
2837 fValidSelector = RT_BOOL(uAttr != UINT32_MAX && (uAttr & X86_DESC_P)); \
2838 } \
2839 if (fValidSelector) \
2840 { \
2841 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##seg; \
2842 pVCpu->hm.s.vmx.RestoreHost.uHostSel##seg = (selValue); \
2843 } \
2844 (selValue) = 0; \
2845 }
2846#endif
2847
2848
2849/**
2850 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2851 * the host-state area in the VMCS.
2852 *
2853 * @returns VBox status code.
2854 * @param pVM The cross context VM structure.
2855 * @param pVCpu The cross context virtual CPU structure.
2856 */
2857DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
2858{
2859 int rc = VERR_INTERNAL_ERROR_5;
2860
2861#if HC_ARCH_BITS == 64
2862 /*
2863 * If we've executed guest code using VT-x, the host-state bits will be messed up. We
2864 * should -not- save the messed up state without restoring the original host-state. See @bugref{7240}.
2865 */
2866 AssertMsgReturn(!(pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED),
2867 ("Re-saving host-state after executing guest code without leaving VT-x!\n"), VERR_WRONG_ORDER);
2868#endif
2869
2870 /*
2871 * Host DS, ES, FS and GS segment registers.
2872 */
2873#if HC_ARCH_BITS == 64
2874 RTSEL uSelDS = ASMGetDS();
2875 RTSEL uSelES = ASMGetES();
2876 RTSEL uSelFS = ASMGetFS();
2877 RTSEL uSelGS = ASMGetGS();
2878#else
2879 RTSEL uSelDS = 0;
2880 RTSEL uSelES = 0;
2881 RTSEL uSelFS = 0;
2882 RTSEL uSelGS = 0;
2883#endif
2884
2885 /* Recalculate which host-state bits need to be manually restored. */
2886 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
2887
2888 /*
2889 * Host CS and SS segment registers.
2890 */
2891 RTSEL uSelCS = ASMGetCS();
2892 RTSEL uSelSS = ASMGetSS();
2893
2894 /*
2895 * Host TR segment register.
2896 */
2897 RTSEL uSelTR = ASMGetTR();
2898
2899#if HC_ARCH_BITS == 64
2900 /*
2901 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them
2902 * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
2903 */
2904 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS);
2905 VMXLOCAL_ADJUST_HOST_SEG(ES, uSelES);
2906 VMXLOCAL_ADJUST_HOST_SEG(FS, uSelFS);
2907 VMXLOCAL_ADJUST_HOST_SEG(GS, uSelGS);
2908# undef VMXLOCAL_ADJUST_HOST_SEG
2909#endif
2910
2911 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2912 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2913 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2914 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2915 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2916 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2917 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2918 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));
2919 Assert(uSelCS);
2920 Assert(uSelTR);
2921
2922 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2923#if 0
2924 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE))
2925 Assert(uSelSS != 0);
2926#endif
2927
2928 /* Write these host selector fields into the host-state area in the VMCS. */
2929 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_CS_SEL, uSelCS);
2930 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_SS_SEL, uSelSS);
2931#if HC_ARCH_BITS == 64
2932 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_DS_SEL, uSelDS);
2933 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_ES_SEL, uSelES);
2934 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FS_SEL, uSelFS);
2935 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_GS_SEL, uSelGS);
2936#else
2937 NOREF(uSelDS);
2938 NOREF(uSelES);
2939 NOREF(uSelFS);
2940 NOREF(uSelGS);
2941#endif
2942 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_TR_SEL, uSelTR);
2943 AssertRCReturn(rc, rc);
2944
2945 /*
2946 * Host GDTR and IDTR.
2947 */
2948 RTGDTR Gdtr;
2949 RTIDTR Idtr;
2950 RT_ZERO(Gdtr);
2951 RT_ZERO(Idtr);
2952 ASMGetGDTR(&Gdtr);
2953 ASMGetIDTR(&Idtr);
2954 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt);
2955 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt);
2956 AssertRCReturn(rc, rc);
2957
2958#if HC_ARCH_BITS == 64
2959 /*
2960 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the
2961 * maximum limit (0xffff) on every VM-exit.
2962 */
2963 if (Gdtr.cbGdt != 0xffff)
2964 {
2965 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
2966 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
2967 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
2968 }
2969
2970 /*
2971 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT"
2972 * and Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit as 0xfff, VT-x
2973 * bloating the limit to 0xffff shouldn't cause any different CPU behavior. However, several hosts either insists
2974 * on 0xfff being the limit (Windows Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there
2975 * but botches sidt alignment in at least one consumer). So, we're only allowing IDTR.LIMIT to be left at 0xffff on
2976 * hosts where we are pretty sure it won't cause trouble.
2977 */
2978# if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
2979 if (Idtr.cbIdt < 0x0fff)
2980# else
2981 if (Idtr.cbIdt != 0xffff)
2982# endif
2983 {
2984 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
2985 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
2986 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
2987 }
2988#endif
2989
2990 /*
2991 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
2992 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
2993 */
2994 AssertMsgReturn((uSelTR | X86_SEL_RPL_LDT) <= Gdtr.cbGdt,
2995 ("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt),
2996 VERR_VMX_INVALID_HOST_STATE);
2997
2998 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
2999#if HC_ARCH_BITS == 64
3000 uintptr_t uTRBase = X86DESC64_BASE(pDesc);
3001
3002 /*
3003 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits.
3004 * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else.
3005 * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode.
3006 * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
3007 *
3008 * [1] See Intel spec. 3.5 "System Descriptor Types".
3009 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
3010 */
3011 Assert(pDesc->System.u4Type == 11);
3012 if ( pDesc->System.u16LimitLow != 0x67
3013 || pDesc->System.u4LimitHigh)
3014 {
3015 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
3016 /* If the host has made GDT read-only, we would need to temporarily toggle CR0.WP before writing the GDT. */
3017 if (pVM->hm.s.fHostKernelFeatures & SUPKERNELFEATURES_GDT_READ_ONLY)
3018 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_READ_ONLY;
3019 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
3020
3021 /* Store the GDTR here as we need it while restoring TR. */
3022 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
3023 }
3024#else
3025 NOREF(pVM);
3026 uintptr_t uTRBase = X86DESC_BASE(pDesc);
3027#endif
3028 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
3029 AssertRCReturn(rc, rc);
3030
3031 /*
3032 * Host FS base and GS base.
3033 */
3034#if HC_ARCH_BITS == 64
3035 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
3036 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
3037 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase);
3038 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase);
3039 AssertRCReturn(rc, rc);
3040
3041 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
3042 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
3043 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
3044 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
3045 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
3046#endif
3047 return rc;
3048}
3049
3050
3051/**
3052 * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
3053 * host-state area of the VMCS. Theses MSRs will be automatically restored on
3054 * the host after every successful VM-exit.
3055 *
3056 * @returns VBox status code.
3057 * @param pVM The cross context VM structure.
3058 * @param pVCpu The cross context virtual CPU structure.
3059 *
3060 * @remarks No-long-jump zone!!!
3061 */
3062DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
3063{
3064 NOREF(pVM);
3065
3066 AssertPtr(pVCpu);
3067 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
3068
3069 int rc = VINF_SUCCESS;
3070#if HC_ARCH_BITS == 64
3071 if (pVM->hm.s.fAllow64BitGuests)
3072 hmR0VmxLazySaveHostMsrs(pVCpu);
3073#endif
3074
3075 /*
3076 * Host Sysenter MSRs.
3077 */
3078 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
3079#if HC_ARCH_BITS == 32
3080 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
3081 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
3082#else
3083 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
3084 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
3085#endif
3086 AssertRCReturn(rc, rc);
3087
3088 /*
3089 * Host EFER MSR.
3090 * If the CPU supports the newer VMCS controls for managing EFER, use it.
3091 * Otherwise it's done as part of auto-load/store MSR area in the VMCS, see hmR0VmxLoadGuestMsrs().
3092 */
3093 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
3094 {
3095 rc = VMXWriteVmcs64(VMX_VMCS64_HOST_EFER_FULL, pVM->hm.s.vmx.u64HostEfer);
3096 AssertRCReturn(rc, rc);
3097 }
3098
3099 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see
3100 * hmR0VmxLoadGuestExitCtls() !! */
3101
3102 return rc;
3103}
3104
3105
3106/**
3107 * Figures out if we need to swap the EFER MSR which is particularly expensive.
3108 *
3109 * We check all relevant bits. For now, that's everything besides LMA/LME, as
3110 * these two bits are handled by VM-entry, see hmR0VmxLoadGuestExitCtls() and
3111 * hmR0VMxLoadGuestEntryCtls().
3112 *
3113 * @returns true if we need to load guest EFER, false otherwise.
3114 * @param pVCpu The cross context virtual CPU structure.
3115 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3116 * out-of-sync. Make sure to update the required fields
3117 * before using them.
3118 *
3119 * @remarks Requires EFER, CR4.
3120 * @remarks No-long-jump zone!!!
3121 */
3122static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3123{
3124#ifdef HMVMX_ALWAYS_SWAP_EFER
3125 return true;
3126#endif
3127
3128#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
3129 /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */
3130 if (CPUMIsGuestInLongMode(pVCpu))
3131 return false;
3132#endif
3133
3134 PVM pVM = pVCpu->CTX_SUFF(pVM);
3135 uint64_t u64HostEfer = pVM->hm.s.vmx.u64HostEfer;
3136 uint64_t u64GuestEfer = pMixedCtx->msrEFER;
3137
3138 /*
3139 * For 64-bit guests, if EFER.SCE bit differs, we need to swap to ensure that the
3140 * guest's SYSCALL behaviour isn't screwed. See @bugref{7386}.
3141 */
3142 if ( CPUMIsGuestInLongMode(pVCpu)
3143 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
3144 {
3145 return true;
3146 }
3147
3148 /*
3149 * If the guest uses PAE and EFER.NXE bit differs, we need to swap EFER as it
3150 * affects guest paging. 64-bit paging implies CR4.PAE as well.
3151 * See Intel spec. 4.5 "IA-32e Paging" and Intel spec. 4.1.1 "Three Paging Modes".
3152 */
3153 if ( (pMixedCtx->cr4 & X86_CR4_PAE)
3154 && (pMixedCtx->cr0 & X86_CR0_PG)
3155 && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
3156 {
3157 /* Assert that host is PAE capable. */
3158 Assert(pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_NX);
3159 return true;
3160 }
3161
3162 /** @todo Check the latest Intel spec. for any other bits,
3163 * like SMEP/SMAP? */
3164 return false;
3165}
3166
3167
3168/**
3169 * Sets up VM-entry controls in the VMCS. These controls can affect things done
3170 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
3171 * controls".
3172 *
3173 * @returns VBox status code.
3174 * @param pVCpu The cross context virtual CPU structure.
3175 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3176 * out-of-sync. Make sure to update the required fields
3177 * before using them.
3178 *
3179 * @remarks Requires EFER.
3180 * @remarks No-long-jump zone!!!
3181 */
3182DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3183{
3184 int rc = VINF_SUCCESS;
3185 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS))
3186 {
3187 PVM pVM = pVCpu->CTX_SUFF(pVM);
3188 uint32_t val = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */
3189 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3190
3191 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
3192 val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
3193
3194 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
3195 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3196 {
3197 val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
3198 Log4(("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n", pVCpu->idCpu));
3199 }
3200 else
3201 Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
3202
3203 /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */
3204 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3205 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3206 {
3207 val |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR;
3208 Log4(("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n", pVCpu->idCpu));
3209 }
3210
3211 /*
3212 * The following should -not- be set (since we're not in SMM mode):
3213 * - VMX_VMCS_CTRL_ENTRY_ENTRY_SMM
3214 * - VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON
3215 */
3216
3217 /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
3218 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR. */
3219
3220 if ((val & zap) != val)
3221 {
3222 LogRel(("hmR0VmxLoadGuestEntryCtls: Invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
3223 pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0, val, zap));
3224 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
3225 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3226 }
3227
3228 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, val);
3229 AssertRCReturn(rc, rc);
3230
3231 pVCpu->hm.s.vmx.u32EntryCtls = val;
3232 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS);
3233 }
3234 return rc;
3235}
3236
3237
3238/**
3239 * Sets up the VM-exit controls in the VMCS.
3240 *
3241 * @returns VBox status code.
3242 * @param pVCpu The cross context virtual CPU structure.
3243 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3244 * out-of-sync. Make sure to update the required fields
3245 * before using them.
3246 *
3247 * @remarks Requires EFER.
3248 */
3249DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3250{
3251 NOREF(pMixedCtx);
3252
3253 int rc = VINF_SUCCESS;
3254 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS))
3255 {
3256 PVM pVM = pVCpu->CTX_SUFF(pVM);
3257 uint32_t val = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0; /* Bits set here must be set in the VMCS. */
3258 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3259
3260 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
3261 val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
3262
3263 /*
3264 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary.
3265 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in hmR0VmxSaveHostMsrs().
3266 */
3267#if HC_ARCH_BITS == 64
3268 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3269 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
3270#else
3271 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3272 {
3273 /* The switcher returns to long mode, EFER is managed by the switcher. */
3274 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3275 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
3276 }
3277 else
3278 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
3279#endif
3280
3281 /* If the newer VMCS fields for managing EFER exists, use it. */
3282 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3283 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3284 {
3285 val |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR
3286 | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;
3287 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n", pVCpu->idCpu));
3288 }
3289
3290 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
3291 Assert(!(val & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
3292
3293 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
3294 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
3295 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR. */
3296
3297 if ( pVM->hm.s.vmx.fUsePreemptTimer
3298 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER))
3299 val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
3300
3301 if ((val & zap) != val)
3302 {
3303 LogRel(("hmR0VmxSetupProcCtls: Invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
3304 pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, val, zap));
3305 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
3306 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3307 }
3308
3309 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, val);
3310 AssertRCReturn(rc, rc);
3311
3312 pVCpu->hm.s.vmx.u32ExitCtls = val;
3313 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_EXIT_CTLS);
3314 }
3315 return rc;
3316}
3317
3318
3319/**
3320 * Loads the guest APIC and related state.
3321 *
3322 * @returns VBox status code.
3323 * @param pVCpu The cross context virtual CPU structure.
3324 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3325 * out-of-sync. Make sure to update the required fields
3326 * before using them.
3327 */
3328DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3329{
3330 NOREF(pMixedCtx);
3331
3332 int rc = VINF_SUCCESS;
3333 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE))
3334 {
3335 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
3336 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
3337 {
3338 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
3339
3340 bool fPendingIntr = false;
3341 uint8_t u8Tpr = 0;
3342 uint8_t u8PendingIntr = 0;
3343 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
3344 AssertRCReturn(rc, rc);
3345
3346 /*
3347 * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when
3348 * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt.
3349 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
3350 * the interrupt when we VM-exit for other reasons.
3351 */
3352 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
3353 uint32_t u32TprThreshold = 0;
3354 if (fPendingIntr)
3355 {
3356 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */
3357 const uint8_t u8PendingPriority = (u8PendingIntr >> 4) & 0xf;
3358 const uint8_t u8TprPriority = (u8Tpr >> 4) & 0xf;
3359 if (u8PendingPriority <= u8TprPriority)
3360 u32TprThreshold = u8PendingPriority;
3361 else
3362 u32TprThreshold = u8TprPriority; /* Required for Vista 64-bit guest, see @bugref{6398}. */
3363 }
3364 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
3365
3366 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
3367 AssertRCReturn(rc, rc);
3368 }
3369
3370 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
3371 }
3372 return rc;
3373}
3374
3375
3376/**
3377 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
3378 *
3379 * @returns Guest's interruptibility-state.
3380 * @param pVCpu The cross context virtual CPU structure.
3381 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3382 * out-of-sync. Make sure to update the required fields
3383 * before using them.
3384 *
3385 * @remarks No-long-jump zone!!!
3386 */
3387DECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3388{
3389 /*
3390 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
3391 */
3392 uint32_t uIntrState = 0;
3393 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3394 {
3395 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
3396 AssertMsg(HMVMXCPU_GST_IS_SET(pVCpu, HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS),
3397 ("%#x\n", HMVMXCPU_GST_VALUE(pVCpu)));
3398 if (pMixedCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
3399 {
3400 if (pMixedCtx->eflags.Bits.u1IF)
3401 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
3402 else
3403 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
3404 }
3405 /* else: Although we can clear the force-flag here, let's keep this side-effects free. */
3406 }
3407
3408 /*
3409 * NMIs to the guest are blocked after an NMI is injected until the guest executes an IRET. We only
3410 * bother with virtual-NMI blocking when we have support for virtual NMIs in the CPU, otherwise
3411 * setting this would block host-NMIs and IRET will not clear the blocking.
3412 *
3413 * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}.
3414 */
3415 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)
3416 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
3417 {
3418 uIntrState |= VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI;
3419 }
3420
3421 return uIntrState;
3422}
3423
3424
3425/**
3426 * Loads the guest's interruptibility-state into the guest-state area in the
3427 * VMCS.
3428 *
3429 * @returns VBox status code.
3430 * @param pVCpu The cross context virtual CPU structure.
3431 * @param uIntrState The interruptibility-state to set.
3432 */
3433static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
3434{
3435 NOREF(pVCpu);
3436 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */
3437 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
3438 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
3439 AssertRC(rc);
3440 return rc;
3441}
3442
3443
3444/**
3445 * Loads the exception intercepts required for guest execution in the VMCS.
3446 *
3447 * @returns VBox status code.
3448 * @param pVCpu The cross context virtual CPU structure.
3449 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3450 * out-of-sync. Make sure to update the required fields
3451 * before using them.
3452 */
3453static int hmR0VmxLoadGuestXcptIntercepts(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3454{
3455 NOREF(pMixedCtx);
3456 int rc = VINF_SUCCESS;
3457 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
3458 {
3459 /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxLoadSharedCR0(). */
3460 if (pVCpu->hm.s.fGIMTrapXcptUD)
3461 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_UD);
3462#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3463 else
3464 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_UD);
3465#endif
3466
3467 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_AC));
3468 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB));
3469
3470 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3471 AssertRCReturn(rc, rc);
3472
3473 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
3474 Log4(("Load[%RU32]: VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu,
3475 pVCpu->hm.s.vmx.u32XcptBitmap, HMCPU_CF_VALUE(pVCpu)));
3476 }
3477 return rc;
3478}
3479
3480
3481/**
3482 * Loads the guest's RIP into the guest-state area in the VMCS.
3483 *
3484 * @returns VBox status code.
3485 * @param pVCpu The cross context virtual CPU structure.
3486 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3487 * out-of-sync. Make sure to update the required fields
3488 * before using them.
3489 *
3490 * @remarks No-long-jump zone!!!
3491 */
3492static int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3493{
3494 int rc = VINF_SUCCESS;
3495 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))
3496 {
3497 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
3498 AssertRCReturn(rc, rc);
3499
3500 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP);
3501 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu, pMixedCtx->rip,
3502 HMCPU_CF_VALUE(pVCpu)));
3503 }
3504 return rc;
3505}
3506
3507
3508/**
3509 * Loads the guest's RSP into the guest-state area in the VMCS.
3510 *
3511 * @returns VBox status code.
3512 * @param pVCpu The cross context virtual CPU structure.
3513 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3514 * out-of-sync. Make sure to update the required fields
3515 * before using them.
3516 *
3517 * @remarks No-long-jump zone!!!
3518 */
3519static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3520{
3521 int rc = VINF_SUCCESS;
3522 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP))
3523 {
3524 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
3525 AssertRCReturn(rc, rc);
3526
3527 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP);
3528 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RSP=%#RX64\n", pVCpu->idCpu, pMixedCtx->rsp));
3529 }
3530 return rc;
3531}
3532
3533
3534/**
3535 * Loads the guest's RFLAGS into the guest-state area in the VMCS.
3536 *
3537 * @returns VBox status code.
3538 * @param pVCpu The cross context virtual CPU structure.
3539 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3540 * out-of-sync. Make sure to update the required fields
3541 * before using them.
3542 *
3543 * @remarks No-long-jump zone!!!
3544 */
3545static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3546{
3547 int rc = VINF_SUCCESS;
3548 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
3549 {
3550 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
3551 Let us assert it as such and use 32-bit VMWRITE. */
3552 Assert(!(pMixedCtx->rflags.u64 >> 32));
3553 X86EFLAGS Eflags = pMixedCtx->eflags;
3554 /** @todo r=bird: There shall be no need to OR in X86_EFL_1 here, nor
3555 * shall there be any reason for clearing bits 63:22, 15, 5 and 3.
3556 * These will never be cleared/set, unless some other part of the VMM
3557 * code is buggy - in which case we're better of finding and fixing
3558 * those bugs than hiding them. */
3559 Assert(Eflags.u32 & X86_EFL_RA1_MASK);
3560 Assert(!(Eflags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
3561 Eflags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
3562 Eflags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */
3563
3564 /*
3565 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM-exit.
3566 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
3567 */
3568 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3569 {
3570 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3571 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3572 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = Eflags.u32; /* Save the original eflags of the real-mode guest. */
3573 Eflags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
3574 Eflags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
3575 }
3576
3577 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, Eflags.u32);
3578 AssertRCReturn(rc, rc);
3579
3580 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS);
3581 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", pVCpu->idCpu, Eflags.u32));
3582 }
3583 return rc;
3584}
3585
3586
3587/**
3588 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
3589 *
3590 * @returns VBox status code.
3591 * @param pVCpu The cross context virtual CPU structure.
3592 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3593 * out-of-sync. Make sure to update the required fields
3594 * before using them.
3595 *
3596 * @remarks No-long-jump zone!!!
3597 */
3598DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3599{
3600 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
3601 rc |= hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
3602 rc |= hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
3603 AssertRCReturn(rc, rc);
3604 return rc;
3605}
3606
3607
3608/**
3609 * Loads the guest CR0 control register into the guest-state area in the VMCS.
3610 * CR0 is partially shared with the host and we have to consider the FPU bits.
3611 *
3612 * @returns VBox status code.
3613 * @param pVCpu The cross context virtual CPU structure.
3614 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3615 * out-of-sync. Make sure to update the required fields
3616 * before using them.
3617 *
3618 * @remarks No-long-jump zone!!!
3619 */
3620static int hmR0VmxLoadSharedCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3621{
3622 /*
3623 * Guest CR0.
3624 * Guest FPU.
3625 */
3626 int rc = VINF_SUCCESS;
3627 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
3628 {
3629 Assert(!(pMixedCtx->cr0 >> 32));
3630 uint32_t u32GuestCR0 = pMixedCtx->cr0;
3631 PVM pVM = pVCpu->CTX_SUFF(pVM);
3632
3633 /* The guest's view (read access) of its CR0 is unblemished. */
3634 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
3635 AssertRCReturn(rc, rc);
3636 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR0));
3637
3638 /* Setup VT-x's view of the guest CR0. */
3639 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
3640 if (pVM->hm.s.fNestedPaging)
3641 {
3642 if (CPUMIsGuestPagingEnabledEx(pMixedCtx))
3643 {
3644 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
3645 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3646 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
3647 }
3648 else
3649 {
3650 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
3651 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3652 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3653 }
3654
3655 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
3656 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3657 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3658
3659 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3660 AssertRCReturn(rc, rc);
3661 }
3662 else
3663 u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
3664
3665 /*
3666 * Guest FPU bits.
3667 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
3668 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
3669 */
3670 u32GuestCR0 |= X86_CR0_NE;
3671 bool fInterceptNM = false;
3672 if (CPUMIsGuestFPUStateActive(pVCpu))
3673 {
3674 fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
3675 /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
3676 We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
3677 }
3678 else
3679 {
3680 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
3681 u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
3682 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
3683 }
3684
3685 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
3686 bool fInterceptMF = false;
3687 if (!(pMixedCtx->cr0 & X86_CR0_NE))
3688 fInterceptMF = true;
3689
3690 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
3691 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3692 {
3693 Assert(PDMVmmDevHeapIsEnabled(pVM));
3694 Assert(pVM->hm.s.vmx.pRealModeTSS);
3695 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
3696 fInterceptNM = true;
3697 fInterceptMF = true;
3698 }
3699 else
3700 {
3701 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
3702 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
3703 }
3704 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
3705
3706 if (fInterceptNM)
3707 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
3708 else
3709 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
3710
3711 if (fInterceptMF)
3712 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
3713 else
3714 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
3715
3716 /* Additional intercepts for debugging, define these yourself explicitly. */
3717#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3718 pVCpu->hm.s.vmx.u32XcptBitmap |= 0
3719 | RT_BIT(X86_XCPT_BP)
3720 | RT_BIT(X86_XCPT_DE)
3721 | RT_BIT(X86_XCPT_NM)
3722 | RT_BIT(X86_XCPT_TS)
3723 | RT_BIT(X86_XCPT_UD)
3724 | RT_BIT(X86_XCPT_NP)
3725 | RT_BIT(X86_XCPT_SS)
3726 | RT_BIT(X86_XCPT_GP)
3727 | RT_BIT(X86_XCPT_PF)
3728 | RT_BIT(X86_XCPT_MF)
3729 ;
3730#elif defined(HMVMX_ALWAYS_TRAP_PF)
3731 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
3732#endif
3733
3734 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
3735
3736 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
3737 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3738 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3739 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
3740 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
3741 else
3742 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
3743
3744 u32GuestCR0 |= uSetCR0;
3745 u32GuestCR0 &= uZapCR0;
3746 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
3747
3748 /* Write VT-x's view of the guest CR0 into the VMCS. */
3749 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
3750 AssertRCReturn(rc, rc);
3751 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", pVCpu->idCpu, u32GuestCR0, uSetCR0,
3752 uZapCR0));
3753
3754 /*
3755 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
3756 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
3757 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
3758 */
3759 uint32_t u32CR0Mask = 0;
3760 u32CR0Mask = X86_CR0_PE
3761 | X86_CR0_NE
3762 | X86_CR0_WP
3763 | X86_CR0_PG
3764 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
3765 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
3766 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
3767
3768 /** @todo Avoid intercepting CR0.PE with unrestricted guests. Fix PGM
3769 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
3770 * and @bugref{6944}. */
3771#if 0
3772 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3773 u32CR0Mask &= ~X86_CR0_PE;
3774#endif
3775 if (pVM->hm.s.fNestedPaging)
3776 u32CR0Mask &= ~X86_CR0_WP;
3777
3778 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
3779 if (fInterceptNM)
3780 {
3781 u32CR0Mask |= X86_CR0_TS
3782 | X86_CR0_MP;
3783 }
3784
3785 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
3786 pVCpu->hm.s.vmx.u32CR0Mask = u32CR0Mask;
3787 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask);
3788 AssertRCReturn(rc, rc);
3789 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", pVCpu->idCpu, u32CR0Mask));
3790
3791 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0);
3792 }
3793 return rc;
3794}
3795
3796
3797/**
3798 * Loads the guest control registers (CR3, CR4) into the guest-state area
3799 * in the VMCS.
3800 *
3801 * @returns VBox status code.
3802 * @param pVCpu The cross context virtual CPU structure.
3803 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3804 * out-of-sync. Make sure to update the required fields
3805 * before using them.
3806 *
3807 * @remarks No-long-jump zone!!!
3808 */
3809static int hmR0VmxLoadGuestCR3AndCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3810{
3811 int rc = VINF_SUCCESS;
3812 PVM pVM = pVCpu->CTX_SUFF(pVM);
3813
3814 /*
3815 * Guest CR2.
3816 * It's always loaded in the assembler code. Nothing to do here.
3817 */
3818
3819 /*
3820 * Guest CR3.
3821 */
3822 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))
3823 {
3824 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
3825 if (pVM->hm.s.fNestedPaging)
3826 {
3827 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
3828
3829 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
3830 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
3831 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
3832 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
3833
3834 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
3835 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
3836 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
3837
3838 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
3839 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
3840 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
3841 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3842 AssertMsg( !((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
3843 || (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY),
3844 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3845
3846 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
3847 AssertRCReturn(rc, rc);
3848 Log4(("Load[%RU32]: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->idCpu, pVCpu->hm.s.vmx.HCPhysEPTP));
3849
3850 if ( pVM->hm.s.vmx.fUnrestrictedGuest
3851 || CPUMIsGuestPagingEnabledEx(pMixedCtx))
3852 {
3853 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
3854 if (CPUMIsGuestInPAEModeEx(pMixedCtx))
3855 {
3856 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
3857 AssertRCReturn(rc, rc);
3858 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u);
3859 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u);
3860 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u);
3861 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u);
3862 AssertRCReturn(rc, rc);
3863 }
3864
3865 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
3866 have Unrestricted Execution to handle the guest when it's not using paging. */
3867 GCPhysGuestCR3 = pMixedCtx->cr3;
3868 }
3869 else
3870 {
3871 /*
3872 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
3873 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
3874 * EPT takes care of translating it to host-physical addresses.
3875 */
3876 RTGCPHYS GCPhys;
3877 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
3878 Assert(PDMVmmDevHeapIsEnabled(pVM));
3879
3880 /* We obtain it here every time as the guest could have relocated this PCI region. */
3881 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
3882 AssertRCReturn(rc, rc);
3883
3884 GCPhysGuestCR3 = GCPhys;
3885 }
3886
3887 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RGv (GstN)\n", pVCpu->idCpu, GCPhysGuestCR3));
3888 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
3889 }
3890 else
3891 {
3892 /* Non-nested paging case, just use the hypervisor's CR3. */
3893 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
3894
3895 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", pVCpu->idCpu, HCPhysGuestCR3));
3896 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
3897 }
3898 AssertRCReturn(rc, rc);
3899
3900 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3);
3901 }
3902
3903 /*
3904 * Guest CR4.
3905 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
3906 */
3907 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))
3908 {
3909 Assert(!(pMixedCtx->cr4 >> 32));
3910 uint32_t u32GuestCR4 = pMixedCtx->cr4;
3911
3912 /* The guest's view of its CR4 is unblemished. */
3913 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
3914 AssertRCReturn(rc, rc);
3915 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR4));
3916
3917 /* Setup VT-x's view of the guest CR4. */
3918 /*
3919 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
3920 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
3921 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
3922 */
3923 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3924 {
3925 Assert(pVM->hm.s.vmx.pRealModeTSS);
3926 Assert(PDMVmmDevHeapIsEnabled(pVM));
3927 u32GuestCR4 &= ~X86_CR4_VME;
3928 }
3929
3930 if (pVM->hm.s.fNestedPaging)
3931 {
3932 if ( !CPUMIsGuestPagingEnabledEx(pMixedCtx)
3933 && !pVM->hm.s.vmx.fUnrestrictedGuest)
3934 {
3935 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
3936 u32GuestCR4 |= X86_CR4_PSE;
3937 /* Our identity mapping is a 32-bit page directory. */
3938 u32GuestCR4 &= ~X86_CR4_PAE;
3939 }
3940 /* else use guest CR4.*/
3941 }
3942 else
3943 {
3944 /*
3945 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
3946 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
3947 */
3948 switch (pVCpu->hm.s.enmShadowMode)
3949 {
3950 case PGMMODE_REAL: /* Real-mode. */
3951 case PGMMODE_PROTECTED: /* Protected mode without paging. */
3952 case PGMMODE_32_BIT: /* 32-bit paging. */
3953 {
3954 u32GuestCR4 &= ~X86_CR4_PAE;
3955 break;
3956 }
3957
3958 case PGMMODE_PAE: /* PAE paging. */
3959 case PGMMODE_PAE_NX: /* PAE paging with NX. */
3960 {
3961 u32GuestCR4 |= X86_CR4_PAE;
3962 break;
3963 }
3964
3965 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
3966 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
3967#ifdef VBOX_ENABLE_64_BITS_GUESTS
3968 break;
3969#endif
3970 default:
3971 AssertFailed();
3972 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
3973 }
3974 }
3975
3976 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
3977 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
3978 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
3979 u32GuestCR4 |= uSetCR4;
3980 u32GuestCR4 &= uZapCR4;
3981
3982 /* Write VT-x's view of the guest CR4 into the VMCS. */
3983 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", pVCpu->idCpu, u32GuestCR4, uSetCR4, uZapCR4));
3984 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4);
3985 AssertRCReturn(rc, rc);
3986
3987 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM-exit. */
3988 uint32_t u32CR4Mask = X86_CR4_VME
3989 | X86_CR4_PAE
3990 | X86_CR4_PGE
3991 | X86_CR4_PSE
3992 | X86_CR4_VMXE;
3993 if (pVM->cpum.ro.HostFeatures.fXSaveRstor)
3994 u32CR4Mask |= X86_CR4_OSXSAVE;
3995 pVCpu->hm.s.vmx.u32CR4Mask = u32CR4Mask;
3996 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask);
3997 AssertRCReturn(rc, rc);
3998
3999 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
4000 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
4001
4002 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4);
4003 }
4004 return rc;
4005}
4006
4007
4008/**
4009 * Loads the guest debug registers into the guest-state area in the VMCS.
4010 *
4011 * This also sets up whether \#DB and MOV DRx accesses cause VM-exits.
4012 *
4013 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
4014 *
4015 * @returns VBox status code.
4016 * @param pVCpu The cross context virtual CPU structure.
4017 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4018 * out-of-sync. Make sure to update the required fields
4019 * before using them.
4020 *
4021 * @remarks No-long-jump zone!!!
4022 */
4023static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4024{
4025 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
4026 return VINF_SUCCESS;
4027
4028#ifdef VBOX_STRICT
4029 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
4030 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
4031 {
4032 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
4033 Assert((pMixedCtx->dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0); /* Bits 63:32, 15, 14, 12, 11 are reserved. */
4034 Assert((pMixedCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); /* Bit 10 is reserved (RA1). */
4035 }
4036#endif
4037
4038 int rc;
4039 PVM pVM = pVCpu->CTX_SUFF(pVM);
4040 bool fSteppingDB = false;
4041 bool fInterceptMovDRx = false;
4042 if (pVCpu->hm.s.fSingleInstruction)
4043 {
4044 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
4045 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
4046 {
4047 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
4048 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
4049 AssertRCReturn(rc, rc);
4050 Assert(fSteppingDB == false);
4051 }
4052 else
4053 {
4054 pMixedCtx->eflags.u32 |= X86_EFL_TF;
4055 pVCpu->hm.s.fClearTrapFlag = true;
4056 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
4057 fSteppingDB = true;
4058 }
4059 }
4060
4061 if ( fSteppingDB
4062 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
4063 {
4064 /*
4065 * Use the combined guest and host DRx values found in the hypervisor
4066 * register set because the debugger has breakpoints active or someone
4067 * is single stepping on the host side without a monitor trap flag.
4068 *
4069 * Note! DBGF expects a clean DR6 state before executing guest code.
4070 */
4071#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
4072 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4073 && !CPUMIsHyperDebugStateActivePending(pVCpu))
4074 {
4075 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4076 Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
4077 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
4078 }
4079 else
4080#endif
4081 if (!CPUMIsHyperDebugStateActive(pVCpu))
4082 {
4083 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4084 Assert(CPUMIsHyperDebugStateActive(pVCpu));
4085 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
4086 }
4087
4088 /* Update DR7. (The other DRx values are handled by CPUM one way or the other.) */
4089 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)CPUMGetHyperDR7(pVCpu));
4090 AssertRCReturn(rc, rc);
4091
4092 pVCpu->hm.s.fUsingHyperDR7 = true;
4093 fInterceptMovDRx = true;
4094 }
4095 else
4096 {
4097 /*
4098 * If the guest has enabled debug registers, we need to load them prior to
4099 * executing guest code so they'll trigger at the right time.
4100 */
4101 if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
4102 {
4103#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
4104 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4105 && !CPUMIsGuestDebugStateActivePending(pVCpu))
4106 {
4107 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4108 Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
4109 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
4110 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4111 }
4112 else
4113#endif
4114 if (!CPUMIsGuestDebugStateActive(pVCpu))
4115 {
4116 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4117 Assert(CPUMIsGuestDebugStateActive(pVCpu));
4118 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
4119 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4120 }
4121 Assert(!fInterceptMovDRx);
4122 }
4123 /*
4124 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
4125 * must intercept #DB in order to maintain a correct DR6 guest value, and
4126 * because we need to intercept it to prevent nested #DBs from hanging the
4127 * CPU, we end up always having to intercept it. See hmR0VmxInitXcptBitmap.
4128 */
4129#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
4130 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu)
4131 && !CPUMIsGuestDebugStateActive(pVCpu))
4132#else
4133 else if (!CPUMIsGuestDebugStateActive(pVCpu))
4134#endif
4135 {
4136 fInterceptMovDRx = true;
4137 }
4138
4139 /* Update guest DR7. */
4140 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
4141 AssertRCReturn(rc, rc);
4142
4143 pVCpu->hm.s.fUsingHyperDR7 = false;
4144 }
4145
4146 /*
4147 * Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions.
4148 */
4149 if (fInterceptMovDRx)
4150 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4151 else
4152 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4153 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
4154 AssertRCReturn(rc, rc);
4155
4156 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);
4157 return VINF_SUCCESS;
4158}
4159
4160
4161#ifdef VBOX_STRICT
4162/**
4163 * Strict function to validate segment registers.
4164 *
4165 * @remarks ASSUMES CR0 is up to date.
4166 */
4167static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4168{
4169 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
4170 /* NOTE: The reason we check for attribute value 0 and not just the unusable bit here is because hmR0VmxWriteSegmentReg()
4171 * only updates the VMCS' copy of the value with the unusable bit and doesn't change the guest-context value. */
4172 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
4173 && ( !CPUMIsGuestInRealModeEx(pCtx)
4174 && !CPUMIsGuestInV86ModeEx(pCtx)))
4175 {
4176 /* Protected mode checks */
4177 /* CS */
4178 Assert(pCtx->cs.Attr.n.u1Present);
4179 Assert(!(pCtx->cs.Attr.u & 0xf00));
4180 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
4181 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4182 || !(pCtx->cs.Attr.n.u1Granularity));
4183 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
4184 || (pCtx->cs.Attr.n.u1Granularity));
4185 /* CS cannot be loaded with NULL in protected mode. */
4186 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
4187 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4188 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
4189 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4190 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
4191 else
4192 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
4193 /* SS */
4194 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4195 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
4196 if ( !(pCtx->cr0 & X86_CR0_PE)
4197 || pCtx->cs.Attr.n.u4Type == 3)
4198 {
4199 Assert(!pCtx->ss.Attr.n.u2Dpl);
4200 }
4201 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4202 {
4203 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4204 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
4205 Assert(pCtx->ss.Attr.n.u1Present);
4206 Assert(!(pCtx->ss.Attr.u & 0xf00));
4207 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
4208 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4209 || !(pCtx->ss.Attr.n.u1Granularity));
4210 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
4211 || (pCtx->ss.Attr.n.u1Granularity));
4212 }
4213 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
4214 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4215 {
4216 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4217 Assert(pCtx->ds.Attr.n.u1Present);
4218 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
4219 Assert(!(pCtx->ds.Attr.u & 0xf00));
4220 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
4221 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4222 || !(pCtx->ds.Attr.n.u1Granularity));
4223 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
4224 || (pCtx->ds.Attr.n.u1Granularity));
4225 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4226 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
4227 }
4228 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4229 {
4230 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4231 Assert(pCtx->es.Attr.n.u1Present);
4232 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
4233 Assert(!(pCtx->es.Attr.u & 0xf00));
4234 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
4235 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
4236 || !(pCtx->es.Attr.n.u1Granularity));
4237 Assert( !(pCtx->es.u32Limit & 0xfff00000)
4238 || (pCtx->es.Attr.n.u1Granularity));
4239 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4240 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
4241 }
4242 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4243 {
4244 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4245 Assert(pCtx->fs.Attr.n.u1Present);
4246 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
4247 Assert(!(pCtx->fs.Attr.u & 0xf00));
4248 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
4249 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
4250 || !(pCtx->fs.Attr.n.u1Granularity));
4251 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
4252 || (pCtx->fs.Attr.n.u1Granularity));
4253 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4254 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4255 }
4256 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
4257 {
4258 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4259 Assert(pCtx->gs.Attr.n.u1Present);
4260 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
4261 Assert(!(pCtx->gs.Attr.u & 0xf00));
4262 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
4263 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
4264 || !(pCtx->gs.Attr.n.u1Granularity));
4265 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
4266 || (pCtx->gs.Attr.n.u1Granularity));
4267 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4268 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4269 }
4270 /* 64-bit capable CPUs. */
4271# if HC_ARCH_BITS == 64
4272 Assert(!(pCtx->cs.u64Base >> 32));
4273 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
4274 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
4275 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
4276# endif
4277 }
4278 else if ( CPUMIsGuestInV86ModeEx(pCtx)
4279 || ( CPUMIsGuestInRealModeEx(pCtx)
4280 && !pVM->hm.s.vmx.fUnrestrictedGuest))
4281 {
4282 /* Real and v86 mode checks. */
4283 /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
4284 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
4285 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4286 {
4287 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
4288 }
4289 else
4290 {
4291 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
4292 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
4293 }
4294
4295 /* CS */
4296 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
4297 Assert(pCtx->cs.u32Limit == 0xffff);
4298 Assert(u32CSAttr == 0xf3);
4299 /* SS */
4300 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
4301 Assert(pCtx->ss.u32Limit == 0xffff);
4302 Assert(u32SSAttr == 0xf3);
4303 /* DS */
4304 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
4305 Assert(pCtx->ds.u32Limit == 0xffff);
4306 Assert(u32DSAttr == 0xf3);
4307 /* ES */
4308 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
4309 Assert(pCtx->es.u32Limit == 0xffff);
4310 Assert(u32ESAttr == 0xf3);
4311 /* FS */
4312 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
4313 Assert(pCtx->fs.u32Limit == 0xffff);
4314 Assert(u32FSAttr == 0xf3);
4315 /* GS */
4316 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
4317 Assert(pCtx->gs.u32Limit == 0xffff);
4318 Assert(u32GSAttr == 0xf3);
4319 /* 64-bit capable CPUs. */
4320# if HC_ARCH_BITS == 64
4321 Assert(!(pCtx->cs.u64Base >> 32));
4322 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
4323 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
4324 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
4325# endif
4326 }
4327}
4328#endif /* VBOX_STRICT */
4329
4330
4331/**
4332 * Writes a guest segment register into the guest-state area in the VMCS.
4333 *
4334 * @returns VBox status code.
4335 * @param pVCpu The cross context virtual CPU structure.
4336 * @param idxSel Index of the selector in the VMCS.
4337 * @param idxLimit Index of the segment limit in the VMCS.
4338 * @param idxBase Index of the segment base in the VMCS.
4339 * @param idxAccess Index of the access rights of the segment in the VMCS.
4340 * @param pSelReg Pointer to the segment selector.
4341 *
4342 * @remarks No-long-jump zone!!!
4343 */
4344static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
4345 uint32_t idxAccess, PCPUMSELREG pSelReg)
4346{
4347 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
4348 rc |= VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
4349 rc |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
4350 AssertRCReturn(rc, rc);
4351
4352 uint32_t u32Access = pSelReg->Attr.u;
4353 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4354 {
4355 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
4356 u32Access = 0xf3;
4357 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
4358 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
4359 }
4360 else
4361 {
4362 /*
4363 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
4364 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
4365 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
4366 * loaded in protected-mode have their attribute as 0.
4367 */
4368 if (!u32Access)
4369 u32Access = X86DESCATTR_UNUSABLE;
4370 }
4371
4372 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
4373 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
4374 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
4375
4376 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
4377 AssertRCReturn(rc, rc);
4378 return rc;
4379}
4380
4381
4382/**
4383 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
4384 * into the guest-state area in the VMCS.
4385 *
4386 * @returns VBox status code.
4387 * @param pVCpu The cross context virtual CPU structure.
4388 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4389 * out-of-sync. Make sure to update the required fields
4390 * before using them.
4391 *
4392 * @remarks ASSUMES pMixedCtx->cr0 is up to date (strict builds validation).
4393 * @remarks No-long-jump zone!!!
4394 */
4395static int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4396{
4397 int rc = VERR_INTERNAL_ERROR_5;
4398 PVM pVM = pVCpu->CTX_SUFF(pVM);
4399
4400 /*
4401 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
4402 */
4403 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))
4404 {
4405 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
4406 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4407 {
4408 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pMixedCtx->cs.Attr.u;
4409 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pMixedCtx->ss.Attr.u;
4410 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pMixedCtx->ds.Attr.u;
4411 pVCpu->hm.s.vmx.RealMode.AttrES.u = pMixedCtx->es.Attr.u;
4412 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pMixedCtx->fs.Attr.u;
4413 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pMixedCtx->gs.Attr.u;
4414 }
4415
4416#ifdef VBOX_WITH_REM
4417 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
4418 {
4419 Assert(pVM->hm.s.vmx.pRealModeTSS);
4420 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
4421 if ( pVCpu->hm.s.vmx.fWasInRealMode
4422 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
4423 {
4424 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
4425 in real-mode (e.g. OpenBSD 4.0) */
4426 REMFlushTBs(pVM);
4427 Log4(("Load[%RU32]: Switch to protected mode detected!\n", pVCpu->idCpu));
4428 pVCpu->hm.s.vmx.fWasInRealMode = false;
4429 }
4430 }
4431#endif
4432 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_CS_SEL, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
4433 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs);
4434 AssertRCReturn(rc, rc);
4435 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_SS_SEL, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
4436 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss);
4437 AssertRCReturn(rc, rc);
4438 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_DS_SEL, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
4439 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds);
4440 AssertRCReturn(rc, rc);
4441 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_ES_SEL, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
4442 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es);
4443 AssertRCReturn(rc, rc);
4444 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FS_SEL, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
4445 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs);
4446 AssertRCReturn(rc, rc);
4447 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_GS_SEL, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
4448 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs);
4449 AssertRCReturn(rc, rc);
4450
4451#ifdef VBOX_STRICT
4452 /* Validate. */
4453 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
4454#endif
4455
4456 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);
4457 Log4(("Load[%RU32]: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pVCpu->idCpu, pMixedCtx->cs.Sel,
4458 pMixedCtx->cs.u64Base, pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
4459 }
4460
4461 /*
4462 * Guest TR.
4463 */
4464 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))
4465 {
4466 /*
4467 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
4468 * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
4469 * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
4470 */
4471 uint16_t u16Sel = 0;
4472 uint32_t u32Limit = 0;
4473 uint64_t u64Base = 0;
4474 uint32_t u32AccessRights = 0;
4475
4476 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4477 {
4478 u16Sel = pMixedCtx->tr.Sel;
4479 u32Limit = pMixedCtx->tr.u32Limit;
4480 u64Base = pMixedCtx->tr.u64Base;
4481 u32AccessRights = pMixedCtx->tr.Attr.u;
4482 }
4483 else
4484 {
4485 Assert(pVM->hm.s.vmx.pRealModeTSS);
4486 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
4487
4488 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
4489 RTGCPHYS GCPhys;
4490 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
4491 AssertRCReturn(rc, rc);
4492
4493 X86DESCATTR DescAttr;
4494 DescAttr.u = 0;
4495 DescAttr.n.u1Present = 1;
4496 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
4497
4498 u16Sel = 0;
4499 u32Limit = HM_VTX_TSS_SIZE;
4500 u64Base = GCPhys; /* in real-mode phys = virt. */
4501 u32AccessRights = DescAttr.u;
4502 }
4503
4504 /* Validate. */
4505 Assert(!(u16Sel & RT_BIT(2)));
4506 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
4507 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
4508 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
4509 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
4510 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
4511 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
4512 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
4513 Assert( (u32Limit & 0xfff) == 0xfff
4514 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
4515 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
4516 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
4517
4518 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_TR_SEL, u16Sel);
4519 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit);
4520 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base);
4521 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights);
4522 AssertRCReturn(rc, rc);
4523
4524 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);
4525 Log4(("Load[%RU32]: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", pVCpu->idCpu, u64Base));
4526 }
4527
4528 /*
4529 * Guest GDTR.
4530 */
4531 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))
4532 {
4533 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt);
4534 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt);
4535 AssertRCReturn(rc, rc);
4536
4537 /* Validate. */
4538 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4539
4540 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);
4541 Log4(("Load[%RU32]: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->gdtr.pGdt));
4542 }
4543
4544 /*
4545 * Guest LDTR.
4546 */
4547 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))
4548 {
4549 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
4550 uint32_t u32Access = 0;
4551 if (!pMixedCtx->ldtr.Attr.u)
4552 u32Access = X86DESCATTR_UNUSABLE;
4553 else
4554 u32Access = pMixedCtx->ldtr.Attr.u;
4555
4556 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_LDTR_SEL, pMixedCtx->ldtr.Sel);
4557 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit);
4558 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base);
4559 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access);
4560 AssertRCReturn(rc, rc);
4561
4562 /* Validate. */
4563 if (!(u32Access & X86DESCATTR_UNUSABLE))
4564 {
4565 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
4566 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
4567 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
4568 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
4569 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
4570 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
4571 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
4572 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
4573 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
4574 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
4575 }
4576
4577 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
4578 Log4(("Load[%RU32]: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->ldtr.u64Base));
4579 }
4580
4581 /*
4582 * Guest IDTR.
4583 */
4584 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))
4585 {
4586 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt);
4587 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt);
4588 AssertRCReturn(rc, rc);
4589
4590 /* Validate. */
4591 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4592
4593 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR);
4594 Log4(("Load[%RU32]: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->idtr.pIdt));
4595 }
4596
4597 return VINF_SUCCESS;
4598}
4599
4600
4601/**
4602 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
4603 * areas.
4604 *
4605 * These MSRs will automatically be loaded to the host CPU on every successful
4606 * VM-entry and stored from the host CPU on every successful VM-exit. This also
4607 * creates/updates MSR slots for the host MSRs. The actual host MSR values are
4608 * -not- updated here for performance reasons. See hmR0VmxSaveHostMsrs().
4609 *
4610 * Also loads the sysenter MSRs into the guest-state area in the VMCS.
4611 *
4612 * @returns VBox status code.
4613 * @param pVCpu The cross context virtual CPU structure.
4614 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4615 * out-of-sync. Make sure to update the required fields
4616 * before using them.
4617 *
4618 * @remarks No-long-jump zone!!!
4619 */
4620static int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4621{
4622 AssertPtr(pVCpu);
4623 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
4624
4625 /*
4626 * MSRs that we use the auto-load/store MSR area in the VMCS.
4627 */
4628 PVM pVM = pVCpu->CTX_SUFF(pVM);
4629 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS))
4630 {
4631 /* For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). */
4632#if HC_ARCH_BITS == 32
4633 if (pVM->hm.s.fAllow64BitGuests)
4634 {
4635 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false, NULL);
4636 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false, NULL);
4637 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false, NULL);
4638 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false, NULL);
4639 AssertRCReturn(rc, rc);
4640# ifdef LOG_ENABLED
4641 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
4642 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
4643 {
4644 Log4(("Load[%RU32]: MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", pVCpu->idCpu, i, pMsr->u32Msr,
4645 pMsr->u64Value));
4646 }
4647# endif
4648 }
4649#endif
4650 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
4651 }
4652
4653 /*
4654 * Guest Sysenter MSRs.
4655 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
4656 * VM-exits on WRMSRs for these MSRs.
4657 */
4658 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR))
4659 {
4660 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc);
4661 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR);
4662 }
4663
4664 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR))
4665 {
4666 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc);
4667 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
4668 }
4669
4670 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR))
4671 {
4672 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc);
4673 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
4674 }
4675
4676 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))
4677 {
4678 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
4679 {
4680 /*
4681 * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
4682 * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
4683 */
4684 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
4685 {
4686 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER);
4687 AssertRCReturn(rc,rc);
4688 Log4(("Load[%RU32]: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pVCpu->idCpu, pMixedCtx->msrEFER));
4689 }
4690 else
4691 {
4692 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */,
4693 NULL /* pfAddedAndUpdated */);
4694 AssertRCReturn(rc, rc);
4695
4696 /* We need to intercept reads too, see @bugref{7386#c16}. */
4697 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
4698 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
4699 Log4(("Load[%RU32]: MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", pVCpu->idCpu, MSR_K6_EFER,
4700 pMixedCtx->msrEFER, pVCpu->hm.s.vmx.cMsrs));
4701 }
4702 }
4703 else if (!pVM->hm.s.vmx.fSupportsVmcsEfer)
4704 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER);
4705 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
4706 }
4707
4708 return VINF_SUCCESS;
4709}
4710
4711
4712/**
4713 * Loads the guest activity state into the guest-state area in the VMCS.
4714 *
4715 * @returns VBox status code.
4716 * @param pVCpu The cross context virtual CPU structure.
4717 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4718 * out-of-sync. Make sure to update the required fields
4719 * before using them.
4720 *
4721 * @remarks No-long-jump zone!!!
4722 */
4723static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4724{
4725 NOREF(pMixedCtx);
4726 /** @todo See if we can make use of other states, e.g.
4727 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
4728 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE))
4729 {
4730 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
4731 AssertRCReturn(rc, rc);
4732
4733 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE);
4734 }
4735 return VINF_SUCCESS;
4736}
4737
4738
4739/**
4740 * Sets up the appropriate function to run guest code.
4741 *
4742 * @returns VBox status code.
4743 * @param pVCpu The cross context virtual CPU structure.
4744 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4745 * out-of-sync. Make sure to update the required fields
4746 * before using them.
4747 *
4748 * @remarks No-long-jump zone!!!
4749 */
4750static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4751{
4752 if (CPUMIsGuestInLongModeEx(pMixedCtx))
4753 {
4754#ifndef VBOX_ENABLE_64_BITS_GUESTS
4755 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4756#endif
4757 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
4758#if HC_ARCH_BITS == 32
4759 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
4760 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
4761 {
4762 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4763 {
4764 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4765 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_VMX_EXIT_CTLS
4766 | HM_CHANGED_VMX_ENTRY_CTLS
4767 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
4768 }
4769 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
4770 }
4771#else
4772 /* 64-bit host. */
4773 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
4774#endif
4775 }
4776 else
4777 {
4778 /* Guest is not in long mode, use the 32-bit handler. */
4779#if HC_ARCH_BITS == 32
4780 if ( pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32
4781 && pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4782 {
4783 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4784 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_VMX_EXIT_CTLS
4785 | HM_CHANGED_VMX_ENTRY_CTLS
4786 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
4787 }
4788#endif
4789 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4790 }
4791 Assert(pVCpu->hm.s.vmx.pfnStartVM);
4792 return VINF_SUCCESS;
4793}
4794
4795
4796/**
4797 * Wrapper for running the guest code in VT-x.
4798 *
4799 * @returns VBox status code, no informational status codes.
4800 * @param pVM The cross context VM structure.
4801 * @param pVCpu The cross context virtual CPU structure.
4802 * @param pCtx Pointer to the guest-CPU context.
4803 *
4804 * @remarks No-long-jump zone!!!
4805 */
4806DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4807{
4808 /*
4809 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
4810 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
4811 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
4812 */
4813 bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
4814 /** @todo Add stats for resume vs launch. */
4815#ifdef VBOX_WITH_KERNEL_USING_XMM
4816 int rc = HMR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
4817#else
4818 int rc = pVCpu->hm.s.vmx.pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
4819#endif
4820 AssertMsg(rc <= VINF_SUCCESS, ("%Rrc\n", rc));
4821 return rc;
4822}
4823
4824
4825/**
4826 * Reports world-switch error and dumps some useful debug info.
4827 *
4828 * @param pVM The cross context VM structure.
4829 * @param pVCpu The cross context virtual CPU structure.
4830 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
4831 * @param pCtx Pointer to the guest-CPU context.
4832 * @param pVmxTransient Pointer to the VMX transient structure (only
4833 * exitReason updated).
4834 */
4835static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
4836{
4837 Assert(pVM);
4838 Assert(pVCpu);
4839 Assert(pCtx);
4840 Assert(pVmxTransient);
4841 HMVMX_ASSERT_PREEMPT_SAFE();
4842
4843 Log4(("VM-entry failure: %Rrc\n", rcVMRun));
4844 switch (rcVMRun)
4845 {
4846 case VERR_VMX_INVALID_VMXON_PTR:
4847 AssertFailed();
4848 break;
4849 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
4850 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
4851 {
4852 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
4853 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
4854 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
4855 AssertRC(rc);
4856
4857 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
4858 /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted().
4859 Cannot do it here as we may have been long preempted. */
4860
4861#ifdef VBOX_STRICT
4862 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
4863 pVmxTransient->uExitReason));
4864 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));
4865 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
4866 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
4867 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
4868 else
4869 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
4870 Log4(("Entered host CPU %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));
4871 Log4(("Current host CPU %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));
4872
4873 /* VMX control bits. */
4874 uint32_t u32Val;
4875 uint64_t u64Val;
4876 RTHCUINTREG uHCReg;
4877 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
4878 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
4879 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
4880 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
4881 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
4882 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
4883 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
4884 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
4885 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
4886 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
4887 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
4888 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
4889 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
4890 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
4891 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
4892 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
4893 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
4894 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
4895 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
4896 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
4897 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
4898 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
4899 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4900 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
4901 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4902 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
4903 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
4904 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
4905 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
4906 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
4907 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
4908 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
4909 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
4910 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
4911 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
4912 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4913 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
4914 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
4915 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
4916 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4917 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
4918 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
4919
4920 /* Guest bits. */
4921 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc);
4922 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val));
4923 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc);
4924 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val));
4925 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
4926 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
4927 rc = VMXReadVmcs32(VMX_VMCS16_VPID, &u32Val); AssertRC(rc);
4928 Log4(("VMX_VMCS16_VPID %u\n", u32Val));
4929
4930 /* Host bits. */
4931 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
4932 Log4(("Host CR0 %#RHr\n", uHCReg));
4933 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
4934 Log4(("Host CR3 %#RHr\n", uHCReg));
4935 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
4936 Log4(("Host CR4 %#RHr\n", uHCReg));
4937
4938 RTGDTR HostGdtr;
4939 PCX86DESCHC pDesc;
4940 ASMGetGDTR(&HostGdtr);
4941 rc = VMXReadVmcs32(VMX_VMCS16_HOST_CS_SEL, &u32Val); AssertRC(rc);
4942 Log4(("Host CS %#08x\n", u32Val));
4943 if (u32Val < HostGdtr.cbGdt)
4944 {
4945 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4946 HMR0DumpDescriptor(pDesc, u32Val, "CS: ");
4947 }
4948
4949 rc = VMXReadVmcs32(VMX_VMCS16_HOST_DS_SEL, &u32Val); AssertRC(rc);
4950 Log4(("Host DS %#08x\n", u32Val));
4951 if (u32Val < HostGdtr.cbGdt)
4952 {
4953 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4954 HMR0DumpDescriptor(pDesc, u32Val, "DS: ");
4955 }
4956
4957 rc = VMXReadVmcs32(VMX_VMCS16_HOST_ES_SEL, &u32Val); AssertRC(rc);
4958 Log4(("Host ES %#08x\n", u32Val));
4959 if (u32Val < HostGdtr.cbGdt)
4960 {
4961 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4962 HMR0DumpDescriptor(pDesc, u32Val, "ES: ");
4963 }
4964
4965 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FS_SEL, &u32Val); AssertRC(rc);
4966 Log4(("Host FS %#08x\n", u32Val));
4967 if (u32Val < HostGdtr.cbGdt)
4968 {
4969 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4970 HMR0DumpDescriptor(pDesc, u32Val, "FS: ");
4971 }
4972
4973 rc = VMXReadVmcs32(VMX_VMCS16_HOST_GS_SEL, &u32Val); AssertRC(rc);
4974 Log4(("Host GS %#08x\n", u32Val));
4975 if (u32Val < HostGdtr.cbGdt)
4976 {
4977 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4978 HMR0DumpDescriptor(pDesc, u32Val, "GS: ");
4979 }
4980
4981 rc = VMXReadVmcs32(VMX_VMCS16_HOST_SS_SEL, &u32Val); AssertRC(rc);
4982 Log4(("Host SS %#08x\n", u32Val));
4983 if (u32Val < HostGdtr.cbGdt)
4984 {
4985 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4986 HMR0DumpDescriptor(pDesc, u32Val, "SS: ");
4987 }
4988
4989 rc = VMXReadVmcs32(VMX_VMCS16_HOST_TR_SEL, &u32Val); AssertRC(rc);
4990 Log4(("Host TR %#08x\n", u32Val));
4991 if (u32Val < HostGdtr.cbGdt)
4992 {
4993 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4994 HMR0DumpDescriptor(pDesc, u32Val, "TR: ");
4995 }
4996
4997 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
4998 Log4(("Host TR Base %#RHv\n", uHCReg));
4999 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
5000 Log4(("Host GDTR Base %#RHv\n", uHCReg));
5001 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
5002 Log4(("Host IDTR Base %#RHv\n", uHCReg));
5003 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
5004 Log4(("Host SYSENTER CS %#08x\n", u32Val));
5005 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
5006 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg));
5007 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
5008 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg));
5009 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
5010 Log4(("Host RSP %#RHv\n", uHCReg));
5011 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
5012 Log4(("Host RIP %#RHv\n", uHCReg));
5013# if HC_ARCH_BITS == 64
5014 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
5015 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
5016 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
5017 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
5018 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
5019 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
5020# endif
5021#endif /* VBOX_STRICT */
5022 break;
5023 }
5024
5025 default:
5026 /* Impossible */
5027 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
5028 break;
5029 }
5030 NOREF(pVM); NOREF(pCtx);
5031}
5032
5033
5034#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
5035#ifndef VMX_USE_CACHED_VMCS_ACCESSES
5036# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
5037#endif
5038#ifdef VBOX_STRICT
5039static bool hmR0VmxIsValidWriteField(uint32_t idxField)
5040{
5041 switch (idxField)
5042 {
5043 case VMX_VMCS_GUEST_RIP:
5044 case VMX_VMCS_GUEST_RSP:
5045 case VMX_VMCS_GUEST_SYSENTER_EIP:
5046 case VMX_VMCS_GUEST_SYSENTER_ESP:
5047 case VMX_VMCS_GUEST_GDTR_BASE:
5048 case VMX_VMCS_GUEST_IDTR_BASE:
5049 case VMX_VMCS_GUEST_CS_BASE:
5050 case VMX_VMCS_GUEST_DS_BASE:
5051 case VMX_VMCS_GUEST_ES_BASE:
5052 case VMX_VMCS_GUEST_FS_BASE:
5053 case VMX_VMCS_GUEST_GS_BASE:
5054 case VMX_VMCS_GUEST_SS_BASE:
5055 case VMX_VMCS_GUEST_LDTR_BASE:
5056 case VMX_VMCS_GUEST_TR_BASE:
5057 case VMX_VMCS_GUEST_CR3:
5058 return true;
5059 }
5060 return false;
5061}
5062
5063static bool hmR0VmxIsValidReadField(uint32_t idxField)
5064{
5065 switch (idxField)
5066 {
5067 /* Read-only fields. */
5068 case VMX_VMCS_RO_EXIT_QUALIFICATION:
5069 return true;
5070 }
5071 /* Remaining readable fields should also be writable. */
5072 return hmR0VmxIsValidWriteField(idxField);
5073}
5074#endif /* VBOX_STRICT */
5075
5076
5077/**
5078 * Executes the specified handler in 64-bit mode.
5079 *
5080 * @returns VBox status code (no informational status codes).
5081 * @param pVM The cross context VM structure.
5082 * @param pVCpu The cross context virtual CPU structure.
5083 * @param pCtx Pointer to the guest CPU context.
5084 * @param enmOp The operation to perform.
5085 * @param cParams Number of parameters.
5086 * @param paParam Array of 32-bit parameters.
5087 */
5088VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp,
5089 uint32_t cParams, uint32_t *paParam)
5090{
5091 NOREF(pCtx);
5092
5093 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
5094 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
5095 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
5096 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
5097
5098#ifdef VBOX_STRICT
5099 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
5100 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
5101
5102 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
5103 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
5104#endif
5105
5106 /* Disable interrupts. */
5107 RTCCUINTREG fOldEFlags = ASMIntDisableFlags();
5108
5109#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
5110 RTCPUID idHostCpu = RTMpCpuId();
5111 CPUMR0SetLApic(pVCpu, idHostCpu);
5112#endif
5113
5114 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
5115 RTHCPHYS HCPhysCpuPage = pCpu->HCPhysMemObj;
5116
5117 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
5118 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5119
5120 /* Leave VMX Root Mode. */
5121 VMXDisable();
5122
5123 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
5124
5125 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
5126 CPUMSetHyperEIP(pVCpu, enmOp);
5127 for (int i = (int)cParams - 1; i >= 0; i--)
5128 CPUMPushHyper(pVCpu, paParam[i]);
5129
5130 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
5131
5132 /* Call the switcher. */
5133 int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
5134 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
5135
5136 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
5137 /* Make sure the VMX instructions don't cause #UD faults. */
5138 SUPR0ChangeCR4(X86_CR4_VMXE, ~0);
5139
5140 /* Re-enter VMX Root Mode */
5141 int rc2 = VMXEnable(HCPhysCpuPage);
5142 if (RT_FAILURE(rc2))
5143 {
5144 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
5145 ASMSetFlags(fOldEFlags);
5146 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
5147 return rc2;
5148 }
5149
5150 rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5151 AssertRC(rc2);
5152 Assert(!(ASMGetFlags() & X86_EFL_IF));
5153 ASMSetFlags(fOldEFlags);
5154 return rc;
5155}
5156
5157
5158/**
5159 * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts
5160 * supporting 64-bit guests.
5161 *
5162 * @returns VBox status code.
5163 * @param fResume Whether to VMLAUNCH or VMRESUME.
5164 * @param pCtx Pointer to the guest-CPU context.
5165 * @param pCache Pointer to the VMCS cache.
5166 * @param pVM The cross context VM structure.
5167 * @param pVCpu The cross context virtual CPU structure.
5168 */
5169DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
5170{
5171 NOREF(fResume);
5172
5173 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
5174 RTHCPHYS HCPhysCpuPage = pCpu->HCPhysMemObj;
5175
5176#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5177 pCache->uPos = 1;
5178 pCache->interPD = PGMGetInterPaeCR3(pVM);
5179 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
5180#endif
5181
5182#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5183 pCache->TestIn.HCPhysCpuPage = 0;
5184 pCache->TestIn.HCPhysVmcs = 0;
5185 pCache->TestIn.pCache = 0;
5186 pCache->TestOut.HCPhysVmcs = 0;
5187 pCache->TestOut.pCache = 0;
5188 pCache->TestOut.pCtx = 0;
5189 pCache->TestOut.eflags = 0;
5190#else
5191 NOREF(pCache);
5192#endif
5193
5194 uint32_t aParam[10];
5195 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
5196 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
5197 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
5198 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
5199 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
5200 aParam[5] = 0;
5201 aParam[6] = VM_RC_ADDR(pVM, pVM);
5202 aParam[7] = 0;
5203 aParam[8] = VM_RC_ADDR(pVM, pVCpu);
5204 aParam[9] = 0;
5205
5206#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5207 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
5208 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
5209#endif
5210 int rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]);
5211
5212#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5213 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
5214 Assert(pCtx->dr[4] == 10);
5215 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
5216#endif
5217
5218#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5219 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
5220 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5221 pVCpu->hm.s.vmx.HCPhysVmcs));
5222 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5223 pCache->TestOut.HCPhysVmcs));
5224 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
5225 pCache->TestOut.pCache));
5226 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
5227 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
5228 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
5229 pCache->TestOut.pCtx));
5230 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
5231#endif
5232 return rc;
5233}
5234
5235
5236/**
5237 * Initialize the VMCS-Read cache.
5238 *
5239 * The VMCS cache is used for 32-bit hosts running 64-bit guests (except 32-bit
5240 * Darwin which runs with 64-bit paging in 32-bit mode) for 64-bit fields that
5241 * cannot be accessed in 32-bit mode. Some 64-bit fields -can- be accessed
5242 * (those that have a 32-bit FULL & HIGH part).
5243 *
5244 * @returns VBox status code.
5245 * @param pVM The cross context VM structure.
5246 * @param pVCpu The cross context virtual CPU structure.
5247 */
5248static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
5249{
5250#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
5251{ \
5252 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
5253 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
5254 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
5255 ++cReadFields; \
5256}
5257
5258 AssertPtr(pVM);
5259 AssertPtr(pVCpu);
5260 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5261 uint32_t cReadFields = 0;
5262
5263 /*
5264 * Don't remove the #if 0'd fields in this code. They're listed here for consistency
5265 * and serve to indicate exceptions to the rules.
5266 */
5267
5268 /* Guest-natural selector base fields. */
5269#if 0
5270 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
5271 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
5272 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
5273#endif
5274 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
5275 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
5276 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
5277 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
5278 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
5279 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
5280 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
5281 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
5282 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
5283 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
5284 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
5285 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
5286#if 0
5287 /* Unused natural width guest-state fields. */
5288 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
5289 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
5290#endif
5291 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
5292 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
5293
5294 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
5295#if 0
5296 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
5297 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
5298 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
5299 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
5300 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
5301 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
5302 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
5303 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
5304 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
5305#endif
5306
5307 /* Natural width guest-state fields. */
5308 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
5309#if 0
5310 /* Currently unused field. */
5311 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
5312#endif
5313
5314 if (pVM->hm.s.fNestedPaging)
5315 {
5316 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
5317 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
5318 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
5319 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
5320 }
5321 else
5322 {
5323 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
5324 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
5325 }
5326
5327#undef VMXLOCAL_INIT_READ_CACHE_FIELD
5328 return VINF_SUCCESS;
5329}
5330
5331
5332/**
5333 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
5334 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
5335 * darwin, running 64-bit guests).
5336 *
5337 * @returns VBox status code.
5338 * @param pVCpu The cross context virtual CPU structure.
5339 * @param idxField The VMCS field encoding.
5340 * @param u64Val 16, 32 or 64-bit value.
5341 */
5342VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5343{
5344 int rc;
5345 switch (idxField)
5346 {
5347 /*
5348 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
5349 */
5350 /* 64-bit Control fields. */
5351 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
5352 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
5353 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
5354 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
5355 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
5356 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
5357 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
5358 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
5359 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
5360 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
5361 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
5362 case VMX_VMCS64_CTRL_EPTP_FULL:
5363 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
5364 /* 64-bit Guest-state fields. */
5365 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
5366 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
5367 case VMX_VMCS64_GUEST_PAT_FULL:
5368 case VMX_VMCS64_GUEST_EFER_FULL:
5369 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
5370 case VMX_VMCS64_GUEST_PDPTE0_FULL:
5371 case VMX_VMCS64_GUEST_PDPTE1_FULL:
5372 case VMX_VMCS64_GUEST_PDPTE2_FULL:
5373 case VMX_VMCS64_GUEST_PDPTE3_FULL:
5374 /* 64-bit Host-state fields. */
5375 case VMX_VMCS64_HOST_PAT_FULL:
5376 case VMX_VMCS64_HOST_EFER_FULL:
5377 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
5378 {
5379 rc = VMXWriteVmcs32(idxField, u64Val);
5380 rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32));
5381 break;
5382 }
5383
5384 /*
5385 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
5386 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
5387 */
5388 /* Natural-width Guest-state fields. */
5389 case VMX_VMCS_GUEST_CR3:
5390 case VMX_VMCS_GUEST_ES_BASE:
5391 case VMX_VMCS_GUEST_CS_BASE:
5392 case VMX_VMCS_GUEST_SS_BASE:
5393 case VMX_VMCS_GUEST_DS_BASE:
5394 case VMX_VMCS_GUEST_FS_BASE:
5395 case VMX_VMCS_GUEST_GS_BASE:
5396 case VMX_VMCS_GUEST_LDTR_BASE:
5397 case VMX_VMCS_GUEST_TR_BASE:
5398 case VMX_VMCS_GUEST_GDTR_BASE:
5399 case VMX_VMCS_GUEST_IDTR_BASE:
5400 case VMX_VMCS_GUEST_RSP:
5401 case VMX_VMCS_GUEST_RIP:
5402 case VMX_VMCS_GUEST_SYSENTER_ESP:
5403 case VMX_VMCS_GUEST_SYSENTER_EIP:
5404 {
5405 if (!(u64Val >> 32))
5406 {
5407 /* If this field is 64-bit, VT-x will zero out the top bits. */
5408 rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
5409 }
5410 else
5411 {
5412 /* Assert that only the 32->64 switcher case should ever come here. */
5413 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
5414 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
5415 }
5416 break;
5417 }
5418
5419 default:
5420 {
5421 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
5422 rc = VERR_INVALID_PARAMETER;
5423 break;
5424 }
5425 }
5426 AssertRCReturn(rc, rc);
5427 return rc;
5428}
5429
5430
5431/**
5432 * Queue up a VMWRITE by using the VMCS write cache.
5433 * This is only used on 32-bit hosts (except darwin) for 64-bit guests.
5434 *
5435 * @param pVCpu The cross context virtual CPU structure.
5436 * @param idxField The VMCS field encoding.
5437 * @param u64Val 16, 32 or 64-bit value.
5438 */
5439VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5440{
5441 AssertPtr(pVCpu);
5442 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5443
5444 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
5445 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
5446
5447 /* Make sure there are no duplicates. */
5448 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
5449 {
5450 if (pCache->Write.aField[i] == idxField)
5451 {
5452 pCache->Write.aFieldVal[i] = u64Val;
5453 return VINF_SUCCESS;
5454 }
5455 }
5456
5457 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
5458 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
5459 pCache->Write.cValidEntries++;
5460 return VINF_SUCCESS;
5461}
5462#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
5463
5464
5465/**
5466 * Sets up the usage of TSC-offsetting and updates the VMCS.
5467 *
5468 * If offsetting is not possible, cause VM-exits on RDTSC(P)s. Also sets up the
5469 * VMX preemption timer.
5470 *
5471 * @returns VBox status code.
5472 * @param pVM The cross context VM structure.
5473 * @param pVCpu The cross context virtual CPU structure.
5474 *
5475 * @remarks No-long-jump zone!!!
5476 */
5477static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVM pVM, PVMCPU pVCpu)
5478{
5479 int rc;
5480 bool fOffsettedTsc;
5481 bool fParavirtTsc;
5482 if (pVM->hm.s.vmx.fUsePreemptTimer)
5483 {
5484 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset,
5485 &fOffsettedTsc, &fParavirtTsc);
5486
5487 /* Make sure the returned values have sane upper and lower boundaries. */
5488 uint64_t u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
5489 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
5490 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
5491 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
5492
5493 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
5494 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);
5495 }
5496 else
5497 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc);
5498
5499 /** @todo later optimize this to be done elsewhere and not before every
5500 * VM-entry. */
5501 if (fParavirtTsc)
5502 {
5503 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
5504 information before every VM-entry, hence disable it for performance sake. */
5505#if 0
5506 rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
5507 AssertRC(rc);
5508#endif
5509 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
5510 }
5511
5512 if (fOffsettedTsc && RT_LIKELY(!pVCpu->hm.s.fDebugWantRdTscExit))
5513 {
5514 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
5515 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
5516
5517 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5518 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5519 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
5520 }
5521 else
5522 {
5523 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
5524 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5525 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
5526 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
5527 }
5528}
5529
5530
5531/**
5532 * Determines if an exception is a contributory exception.
5533 *
5534 * Contributory exceptions are ones which can cause double-faults unless the
5535 * original exception was a benign exception. Page-fault is intentionally not
5536 * included here as it's a conditional contributory exception.
5537 *
5538 * @returns true if the exception is contributory, false otherwise.
5539 * @param uVector The exception vector.
5540 */
5541DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
5542{
5543 switch (uVector)
5544 {
5545 case X86_XCPT_GP:
5546 case X86_XCPT_SS:
5547 case X86_XCPT_NP:
5548 case X86_XCPT_TS:
5549 case X86_XCPT_DE:
5550 return true;
5551 default:
5552 break;
5553 }
5554 return false;
5555}
5556
5557
5558/**
5559 * Sets an event as a pending event to be injected into the guest.
5560 *
5561 * @param pVCpu The cross context virtual CPU structure.
5562 * @param u32IntInfo The VM-entry interruption-information field.
5563 * @param cbInstr The VM-entry instruction length in bytes (for software
5564 * interrupts, exceptions and privileged software
5565 * exceptions).
5566 * @param u32ErrCode The VM-entry exception error code.
5567 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
5568 * page-fault.
5569 *
5570 * @remarks Statistics counter assumes this is a guest event being injected or
5571 * re-injected into the guest, i.e. 'StatInjectPendingReflect' is
5572 * always incremented.
5573 */
5574DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
5575 RTGCUINTPTR GCPtrFaultAddress)
5576{
5577 Assert(!pVCpu->hm.s.Event.fPending);
5578 pVCpu->hm.s.Event.fPending = true;
5579 pVCpu->hm.s.Event.u64IntInfo = u32IntInfo;
5580 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
5581 pVCpu->hm.s.Event.cbInstr = cbInstr;
5582 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
5583
5584 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
5585}
5586
5587
5588/**
5589 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
5590 *
5591 * @param pVCpu The cross context virtual CPU structure.
5592 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5593 * out-of-sync. Make sure to update the required fields
5594 * before using them.
5595 */
5596DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5597{
5598 NOREF(pMixedCtx);
5599 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
5600 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5601 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5602 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5603}
5604
5605
5606/**
5607 * Handle a condition that occurred while delivering an event through the guest
5608 * IDT.
5609 *
5610 * @returns Strict VBox status code (i.e. informational status codes too).
5611 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5612 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5613 * to continue execution of the guest which will delivery the \#DF.
5614 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5615 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5616 *
5617 * @param pVCpu The cross context virtual CPU structure.
5618 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5619 * out-of-sync. Make sure to update the required fields
5620 * before using them.
5621 * @param pVmxTransient Pointer to the VMX transient structure.
5622 *
5623 * @remarks No-long-jump zone!!!
5624 */
5625static VBOXSTRICTRC hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
5626{
5627 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
5628
5629 int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient); AssertRCReturn(rc2, rc2);
5630 rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); AssertRCReturn(rc2, rc2);
5631
5632 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5633 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
5634 {
5635 uint32_t uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
5636 uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
5637
5638 typedef enum
5639 {
5640 VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
5641 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
5642 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
5643 VMXREFLECTXCPT_HANG, /* Indicate bad VM trying to deadlock the CPU. */
5644 VMXREFLECTXCPT_NONE /* Nothing to reflect. */
5645 } VMXREFLECTXCPT;
5646
5647 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
5648 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
5649 if (VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo))
5650 {
5651 if (uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
5652 {
5653 enmReflect = VMXREFLECTXCPT_XCPT;
5654#ifdef VBOX_STRICT
5655 if ( hmR0VmxIsContributoryXcpt(uIdtVector)
5656 && uExitVector == X86_XCPT_PF)
5657 {
5658 Log4(("IDT: vcpu[%RU32] Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5659 }
5660#endif
5661 if ( uExitVector == X86_XCPT_PF
5662 && uIdtVector == X86_XCPT_PF)
5663 {
5664 pVmxTransient->fVectoringDoublePF = true;
5665 Log4(("IDT: vcpu[%RU32] Vectoring Double #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5666 }
5667 else if ( uExitVector == X86_XCPT_AC
5668 && uIdtVector == X86_XCPT_AC)
5669 {
5670 enmReflect = VMXREFLECTXCPT_HANG;
5671 Log4(("IDT: Nested #AC - Bad guest\n"));
5672 }
5673 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
5674 && hmR0VmxIsContributoryXcpt(uExitVector)
5675 && ( hmR0VmxIsContributoryXcpt(uIdtVector)
5676 || uIdtVector == X86_XCPT_PF))
5677 {
5678 enmReflect = VMXREFLECTXCPT_DF;
5679 }
5680 else if (uIdtVector == X86_XCPT_DF)
5681 enmReflect = VMXREFLECTXCPT_TF;
5682 }
5683 else if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
5684 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
5685 {
5686 /*
5687 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and
5688 * privileged software exception (#DB from ICEBP) as they reoccur when restarting the instruction.
5689 */
5690 enmReflect = VMXREFLECTXCPT_XCPT;
5691
5692 if (uExitVector == X86_XCPT_PF)
5693 {
5694 pVmxTransient->fVectoringPF = true;
5695 Log4(("IDT: vcpu[%RU32] Vectoring #PF due to Ext-Int/NMI. uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
5696 }
5697 }
5698 }
5699 else if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5700 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
5701 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
5702 {
5703 /*
5704 * If event delivery caused an EPT violation/misconfig or APIC access VM-exit, then the VM-exit
5705 * interruption-information will not be valid as it's not an exception and we end up here. In such cases,
5706 * it is sufficient to reflect the original exception to the guest after handling the VM-exit.
5707 */
5708 enmReflect = VMXREFLECTXCPT_XCPT;
5709 }
5710
5711 /*
5712 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig etc.) occurred
5713 * while delivering the NMI, we need to clear the block-by-NMI field in the guest interruptibility-state before
5714 * re-delivering the NMI after handling the VM-exit. Otherwise the subsequent VM-entry would fail.
5715 *
5716 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}.
5717 */
5718 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5719 && enmReflect == VMXREFLECTXCPT_XCPT
5720 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
5721 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
5722 {
5723 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
5724 }
5725
5726 switch (enmReflect)
5727 {
5728 case VMXREFLECTXCPT_XCPT:
5729 {
5730 Assert( uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5731 && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5732 && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
5733
5734 uint32_t u32ErrCode = 0;
5735 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo))
5736 {
5737 rc2 = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
5738 AssertRCReturn(rc2, rc2);
5739 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5740 }
5741
5742 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
5743 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
5744 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
5745 rcStrict = VINF_SUCCESS;
5746 Log4(("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu,
5747 pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.u32ErrCode));
5748
5749 break;
5750 }
5751
5752 case VMXREFLECTXCPT_DF:
5753 {
5754 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
5755 rcStrict = VINF_HM_DOUBLE_FAULT;
5756 Log4(("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,
5757 pVCpu->hm.s.Event.u64IntInfo, uIdtVector, uExitVector));
5758
5759 break;
5760 }
5761
5762 case VMXREFLECTXCPT_TF:
5763 {
5764 rcStrict = VINF_EM_RESET;
5765 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector,
5766 uExitVector));
5767 break;
5768 }
5769
5770 case VMXREFLECTXCPT_HANG:
5771 {
5772 rcStrict = VERR_EM_GUEST_CPU_HANG;
5773 break;
5774 }
5775
5776 default:
5777 Assert(rcStrict == VINF_SUCCESS);
5778 break;
5779 }
5780 }
5781 else if ( VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo)
5782 && VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(pVmxTransient->uExitIntInfo)
5783 && uExitVector != X86_XCPT_DF
5784 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
5785 {
5786 /*
5787 * Execution of IRET caused this fault when NMI blocking was in effect (i.e we're in the guest NMI handler).
5788 * We need to set the block-by-NMI field so that NMIs remain blocked until the IRET execution is restarted.
5789 * See Intel spec. 30.7.1.2 "Resuming guest software after handling an exception".
5790 */
5791 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
5792 {
5793 Log4(("hmR0VmxCheckExitDueToEventDelivery: vcpu[%RU32] Setting VMCPU_FF_BLOCK_NMIS. Valid=%RTbool uExitReason=%u\n",
5794 pVCpu->idCpu, VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason));
5795 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5796 }
5797 }
5798
5799 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
5800 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
5801 return rcStrict;
5802}
5803
5804
5805/**
5806 * Saves the guest's CR0 register from the VMCS into the guest-CPU context.
5807 *
5808 * @returns VBox status code.
5809 * @param pVCpu The cross context virtual CPU structure.
5810 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5811 * out-of-sync. Make sure to update the required fields
5812 * before using them.
5813 *
5814 * @remarks No-long-jump zone!!!
5815 */
5816static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5817{
5818 NOREF(pMixedCtx);
5819
5820 /*
5821 * While in the middle of saving guest-CR0, we could get preempted and re-invoked from the preemption hook,
5822 * see hmR0VmxLeave(). Safer to just make this code non-preemptible.
5823 */
5824 VMMRZCallRing3Disable(pVCpu);
5825 HM_DISABLE_PREEMPT();
5826
5827 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0))
5828 {
5829 uint32_t uVal = 0;
5830 uint32_t uShadow = 0;
5831 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal);
5832 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
5833 AssertRCReturn(rc, rc);
5834
5835 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR0Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR0Mask);
5836 CPUMSetGuestCR0(pVCpu, uVal);
5837 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0);
5838 }
5839
5840 HM_RESTORE_PREEMPT();
5841 VMMRZCallRing3Enable(pVCpu);
5842 return VINF_SUCCESS;
5843}
5844
5845
5846/**
5847 * Saves the guest's CR4 register from the VMCS into the guest-CPU context.
5848 *
5849 * @returns VBox status code.
5850 * @param pVCpu The cross context virtual CPU structure.
5851 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5852 * out-of-sync. Make sure to update the required fields
5853 * before using them.
5854 *
5855 * @remarks No-long-jump zone!!!
5856 */
5857static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5858{
5859 NOREF(pMixedCtx);
5860
5861 int rc = VINF_SUCCESS;
5862 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4))
5863 {
5864 uint32_t uVal = 0;
5865 uint32_t uShadow = 0;
5866 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal);
5867 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
5868 AssertRCReturn(rc, rc);
5869
5870 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR4Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR4Mask);
5871 CPUMSetGuestCR4(pVCpu, uVal);
5872 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4);
5873 }
5874 return rc;
5875}
5876
5877
5878/**
5879 * Saves the guest's RIP register from the VMCS into the guest-CPU context.
5880 *
5881 * @returns VBox status code.
5882 * @param pVCpu The cross context virtual CPU structure.
5883 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5884 * out-of-sync. Make sure to update the required fields
5885 * before using them.
5886 *
5887 * @remarks No-long-jump zone!!!
5888 */
5889static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5890{
5891 int rc = VINF_SUCCESS;
5892 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP))
5893 {
5894 uint64_t u64Val = 0;
5895 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
5896 AssertRCReturn(rc, rc);
5897
5898 pMixedCtx->rip = u64Val;
5899 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP);
5900 }
5901 return rc;
5902}
5903
5904
5905/**
5906 * Saves the guest's RSP register from the VMCS into the guest-CPU context.
5907 *
5908 * @returns VBox status code.
5909 * @param pVCpu The cross context virtual CPU structure.
5910 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5911 * out-of-sync. Make sure to update the required fields
5912 * before using them.
5913 *
5914 * @remarks No-long-jump zone!!!
5915 */
5916static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5917{
5918 int rc = VINF_SUCCESS;
5919 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP))
5920 {
5921 uint64_t u64Val = 0;
5922 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
5923 AssertRCReturn(rc, rc);
5924
5925 pMixedCtx->rsp = u64Val;
5926 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP);
5927 }
5928 return rc;
5929}
5930
5931
5932/**
5933 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
5934 *
5935 * @returns VBox status code.
5936 * @param pVCpu The cross context virtual CPU structure.
5937 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5938 * out-of-sync. Make sure to update the required fields
5939 * before using them.
5940 *
5941 * @remarks No-long-jump zone!!!
5942 */
5943static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5944{
5945 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS))
5946 {
5947 uint32_t uVal = 0;
5948 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal);
5949 AssertRCReturn(rc, rc);
5950
5951 pMixedCtx->eflags.u32 = uVal;
5952 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) /* Undo our real-on-v86-mode changes to eflags if necessary. */
5953 {
5954 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
5955 Log4(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32));
5956
5957 pMixedCtx->eflags.Bits.u1VM = 0;
5958 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
5959 }
5960
5961 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS);
5962 }
5963 return VINF_SUCCESS;
5964}
5965
5966
5967/**
5968 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
5969 * guest-CPU context.
5970 */
5971DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5972{
5973 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
5974 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
5975 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
5976 return rc;
5977}
5978
5979
5980/**
5981 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
5982 * from the guest-state area in the VMCS.
5983 *
5984 * @param pVCpu The cross context virtual CPU structure.
5985 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5986 * out-of-sync. Make sure to update the required fields
5987 * before using them.
5988 *
5989 * @remarks No-long-jump zone!!!
5990 */
5991static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5992{
5993 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE))
5994 {
5995 uint32_t uIntrState = 0;
5996 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
5997 AssertRC(rc);
5998
5999 if (!uIntrState)
6000 {
6001 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6002 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6003
6004 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6005 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6006 }
6007 else
6008 {
6009 if (uIntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
6010 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI))
6011 {
6012 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6013 AssertRC(rc);
6014 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */
6015 AssertRC(rc);
6016
6017 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
6018 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
6019 }
6020 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6021 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6022
6023 if (uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)
6024 {
6025 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6026 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6027 }
6028 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6029 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6030 }
6031
6032 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE);
6033 }
6034}
6035
6036
6037/**
6038 * Saves the guest's activity state.
6039 *
6040 * @returns VBox status code.
6041 * @param pVCpu The cross context virtual CPU structure.
6042 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6043 * out-of-sync. Make sure to update the required fields
6044 * before using them.
6045 *
6046 * @remarks No-long-jump zone!!!
6047 */
6048static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6049{
6050 NOREF(pMixedCtx);
6051 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
6052 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_ACTIVITY_STATE);
6053 return VINF_SUCCESS;
6054}
6055
6056
6057/**
6058 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
6059 * the current VMCS into the guest-CPU context.
6060 *
6061 * @returns VBox status code.
6062 * @param pVCpu The cross context virtual CPU structure.
6063 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6064 * out-of-sync. Make sure to update the required fields
6065 * before using them.
6066 *
6067 * @remarks No-long-jump zone!!!
6068 */
6069static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6070{
6071 int rc = VINF_SUCCESS;
6072 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
6073 {
6074 uint32_t u32Val = 0;
6075 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc);
6076 pMixedCtx->SysEnter.cs = u32Val;
6077 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR);
6078 }
6079
6080 uint64_t u64Val = 0;
6081 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
6082 {
6083 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &u64Val); AssertRCReturn(rc, rc);
6084 pMixedCtx->SysEnter.eip = u64Val;
6085 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR);
6086 }
6087 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
6088 {
6089 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &u64Val); AssertRCReturn(rc, rc);
6090 pMixedCtx->SysEnter.esp = u64Val;
6091 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR);
6092 }
6093 return rc;
6094}
6095
6096
6097/**
6098 * Saves the set of guest MSRs (that we restore lazily while leaving VT-x) from
6099 * the CPU back into the guest-CPU context.
6100 *
6101 * @returns VBox status code.
6102 * @param pVCpu The cross context virtual CPU structure.
6103 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6104 * out-of-sync. Make sure to update the required fields
6105 * before using them.
6106 *
6107 * @remarks No-long-jump zone!!!
6108 */
6109static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6110{
6111#if HC_ARCH_BITS == 64
6112 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
6113 {
6114 /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */
6115 VMMRZCallRing3Disable(pVCpu);
6116 HM_DISABLE_PREEMPT();
6117
6118 /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */
6119 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS))
6120 {
6121 hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx);
6122 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6123 }
6124
6125 HM_RESTORE_PREEMPT();
6126 VMMRZCallRing3Enable(pVCpu);
6127 }
6128 else
6129 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6130#else
6131 NOREF(pMixedCtx);
6132 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
6133#endif
6134
6135 return VINF_SUCCESS;
6136}
6137
6138
6139/**
6140 * Saves the auto load/store'd guest MSRs from the current VMCS into
6141 * the guest-CPU context.
6142 *
6143 * @returns VBox status code.
6144 * @param pVCpu The cross context virtual CPU structure.
6145 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6146 * out-of-sync. Make sure to update the required fields
6147 * before using them.
6148 *
6149 * @remarks No-long-jump zone!!!
6150 */
6151static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6152{
6153 if (HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS))
6154 return VINF_SUCCESS;
6155
6156 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
6157 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
6158 Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", cMsrs));
6159 for (uint32_t i = 0; i < cMsrs; i++, pMsr++)
6160 {
6161 switch (pMsr->u32Msr)
6162 {
6163 case MSR_K8_TSC_AUX: CPUMR0SetGuestTscAux(pVCpu, pMsr->u64Value); break;
6164 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;
6165 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;
6166 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;
6167 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;
6168 case MSR_K6_EFER: /* Nothing to do here since we intercept writes, see hmR0VmxLoadGuestMsrs(). */
6169 break;
6170
6171 default:
6172 {
6173 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, cMsrs));
6174 pVCpu->hm.s.u32HMError = pMsr->u32Msr;
6175 return VERR_HM_UNEXPECTED_LD_ST_MSR;
6176 }
6177 }
6178 }
6179
6180 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS);
6181 return VINF_SUCCESS;
6182}
6183
6184
6185/**
6186 * Saves the guest control registers from the current VMCS into the guest-CPU
6187 * context.
6188 *
6189 * @returns VBox status code.
6190 * @param pVCpu The cross context virtual CPU structure.
6191 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6192 * out-of-sync. Make sure to update the required fields
6193 * before using them.
6194 *
6195 * @remarks No-long-jump zone!!!
6196 */
6197static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6198{
6199 /* Guest CR0. Guest FPU. */
6200 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6201 AssertRCReturn(rc, rc);
6202
6203 /* Guest CR4. */
6204 rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
6205 AssertRCReturn(rc, rc);
6206
6207 /* Guest CR2 - updated always during the world-switch or in #PF. */
6208 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
6209 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3))
6210 {
6211 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
6212 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4));
6213
6214 PVM pVM = pVCpu->CTX_SUFF(pVM);
6215 if ( pVM->hm.s.vmx.fUnrestrictedGuest
6216 || ( pVM->hm.s.fNestedPaging
6217 && CPUMIsGuestPagingEnabledEx(pMixedCtx)))
6218 {
6219 uint64_t u64Val = 0;
6220 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
6221 if (pMixedCtx->cr3 != u64Val)
6222 {
6223 CPUMSetGuestCR3(pVCpu, u64Val);
6224 if (VMMRZCallRing3IsEnabled(pVCpu))
6225 {
6226 PGMUpdateCR3(pVCpu, u64Val);
6227 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6228 }
6229 else
6230 {
6231 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
6232 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
6233 }
6234 }
6235
6236 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
6237 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */
6238 {
6239 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);
6240 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);
6241 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);
6242 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);
6243 AssertRCReturn(rc, rc);
6244
6245 if (VMMRZCallRing3IsEnabled(pVCpu))
6246 {
6247 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6248 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6249 }
6250 else
6251 {
6252 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
6253 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
6254 }
6255 }
6256 }
6257
6258 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3);
6259 }
6260
6261 /*
6262 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
6263 * -> VMMRZCallRing3Disable() -> hmR0VmxSaveGuestState() -> Set VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
6264 * -> continue with VM-exit handling -> hmR0VmxSaveGuestControlRegs() and here we are.
6265 *
6266 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
6267 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
6268 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
6269 * -NOT- check if HMVMX_UPDATED_GUEST_CR3 is already set or not!
6270 *
6271 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
6272 */
6273 if (VMMRZCallRing3IsEnabled(pVCpu))
6274 {
6275 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6276 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
6277
6278 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6279 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6280
6281 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6282 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6283 }
6284
6285 return rc;
6286}
6287
6288
6289/**
6290 * Reads a guest segment register from the current VMCS into the guest-CPU
6291 * context.
6292 *
6293 * @returns VBox status code.
6294 * @param pVCpu The cross context virtual CPU structure.
6295 * @param idxSel Index of the selector in the VMCS.
6296 * @param idxLimit Index of the segment limit in the VMCS.
6297 * @param idxBase Index of the segment base in the VMCS.
6298 * @param idxAccess Index of the access rights of the segment in the VMCS.
6299 * @param pSelReg Pointer to the segment selector.
6300 *
6301 * @remarks No-long-jump zone!!!
6302 * @remarks Never call this function directly!!! Use the VMXLOCAL_READ_SEG()
6303 * macro as that takes care of whether to read from the VMCS cache or
6304 * not.
6305 */
6306DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
6307 PCPUMSELREG pSelReg)
6308{
6309 NOREF(pVCpu);
6310
6311 uint32_t u32Val = 0;
6312 int rc = VMXReadVmcs32(idxSel, &u32Val);
6313 AssertRCReturn(rc, rc);
6314 pSelReg->Sel = (uint16_t)u32Val;
6315 pSelReg->ValidSel = (uint16_t)u32Val;
6316 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6317
6318 rc = VMXReadVmcs32(idxLimit, &u32Val);
6319 AssertRCReturn(rc, rc);
6320 pSelReg->u32Limit = u32Val;
6321
6322 uint64_t u64Val = 0;
6323 rc = VMXReadVmcsGstNByIdxVal(idxBase, &u64Val);
6324 AssertRCReturn(rc, rc);
6325 pSelReg->u64Base = u64Val;
6326
6327 rc = VMXReadVmcs32(idxAccess, &u32Val);
6328 AssertRCReturn(rc, rc);
6329 pSelReg->Attr.u = u32Val;
6330
6331 /*
6332 * If VT-x marks the segment as unusable, most other bits remain undefined:
6333 * - For CS the L, D and G bits have meaning.
6334 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
6335 * - For the remaining data segments no bits are defined.
6336 *
6337 * The present bit and the unusable bit has been observed to be set at the
6338 * same time (the selector was supposed to be invalid as we started executing
6339 * a V8086 interrupt in ring-0).
6340 *
6341 * What should be important for the rest of the VBox code, is that the P bit is
6342 * cleared. Some of the other VBox code recognizes the unusable bit, but
6343 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
6344 * safe side here, we'll strip off P and other bits we don't care about. If
6345 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
6346 *
6347 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
6348 */
6349 if (pSelReg->Attr.u & X86DESCATTR_UNUSABLE)
6350 {
6351 Assert(idxSel != VMX_VMCS16_GUEST_TR_SEL); /* TR is the only selector that can never be unusable. */
6352
6353 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
6354 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
6355 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
6356
6357 Log4(("hmR0VmxReadSegmentReg: Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Val, pSelReg->Attr.u));
6358#ifdef DEBUG_bird
6359 AssertMsg((u32Val & ~X86DESCATTR_P) == pSelReg->Attr.u,
6360 ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
6361 idxSel, u32Val, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
6362#endif
6363 }
6364 return VINF_SUCCESS;
6365}
6366
6367
6368#ifdef VMX_USE_CACHED_VMCS_ACCESSES
6369# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
6370 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
6371 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
6372#else
6373# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
6374 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
6375 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
6376#endif
6377
6378
6379/**
6380 * Saves the guest segment registers from the current VMCS into the guest-CPU
6381 * context.
6382 *
6383 * @returns VBox status code.
6384 * @param pVCpu The cross context virtual CPU structure.
6385 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6386 * out-of-sync. Make sure to update the required fields
6387 * before using them.
6388 *
6389 * @remarks No-long-jump zone!!!
6390 */
6391static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6392{
6393 /* Guest segment registers. */
6394 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS))
6395 {
6396 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6397 AssertRCReturn(rc, rc);
6398
6399 rc = VMXLOCAL_READ_SEG(CS, cs);
6400 rc |= VMXLOCAL_READ_SEG(SS, ss);
6401 rc |= VMXLOCAL_READ_SEG(DS, ds);
6402 rc |= VMXLOCAL_READ_SEG(ES, es);
6403 rc |= VMXLOCAL_READ_SEG(FS, fs);
6404 rc |= VMXLOCAL_READ_SEG(GS, gs);
6405 AssertRCReturn(rc, rc);
6406
6407 /* Restore segment attributes for real-on-v86 mode hack. */
6408 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6409 {
6410 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
6411 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
6412 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
6413 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
6414 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
6415 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
6416 }
6417 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS);
6418 }
6419
6420 return VINF_SUCCESS;
6421}
6422
6423
6424/**
6425 * Saves the guest descriptor table registers and task register from the current
6426 * VMCS into the guest-CPU context.
6427 *
6428 * @returns VBox status code.
6429 * @param pVCpu The cross context virtual CPU structure.
6430 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6431 * out-of-sync. Make sure to update the required fields
6432 * before using them.
6433 *
6434 * @remarks No-long-jump zone!!!
6435 */
6436static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6437{
6438 int rc = VINF_SUCCESS;
6439
6440 /* Guest LDTR. */
6441 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR))
6442 {
6443 rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
6444 AssertRCReturn(rc, rc);
6445 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR);
6446 }
6447
6448 /* Guest GDTR. */
6449 uint64_t u64Val = 0;
6450 uint32_t u32Val = 0;
6451 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR))
6452 {
6453 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
6454 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
6455 pMixedCtx->gdtr.pGdt = u64Val;
6456 pMixedCtx->gdtr.cbGdt = u32Val;
6457 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR);
6458 }
6459
6460 /* Guest IDTR. */
6461 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR))
6462 {
6463 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
6464 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
6465 pMixedCtx->idtr.pIdt = u64Val;
6466 pMixedCtx->idtr.cbIdt = u32Val;
6467 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR);
6468 }
6469
6470 /* Guest TR. */
6471 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR))
6472 {
6473 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6474 AssertRCReturn(rc, rc);
6475
6476 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
6477 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6478 {
6479 rc = VMXLOCAL_READ_SEG(TR, tr);
6480 AssertRCReturn(rc, rc);
6481 }
6482 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR);
6483 }
6484 return rc;
6485}
6486
6487#undef VMXLOCAL_READ_SEG
6488
6489
6490/**
6491 * Saves the guest debug-register DR7 from the current VMCS into the guest-CPU
6492 * context.
6493 *
6494 * @returns VBox status code.
6495 * @param pVCpu The cross context virtual CPU structure.
6496 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6497 * out-of-sync. Make sure to update the required fields
6498 * before using them.
6499 *
6500 * @remarks No-long-jump zone!!!
6501 */
6502static int hmR0VmxSaveGuestDR7(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6503{
6504 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG))
6505 {
6506 if (!pVCpu->hm.s.fUsingHyperDR7)
6507 {
6508 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
6509 uint32_t u32Val;
6510 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); AssertRCReturn(rc, rc);
6511 pMixedCtx->dr[7] = u32Val;
6512 }
6513
6514 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DEBUG);
6515 }
6516 return VINF_SUCCESS;
6517}
6518
6519
6520/**
6521 * Saves the guest APIC state from the current VMCS into the guest-CPU context.
6522 *
6523 * @returns VBox status code.
6524 * @param pVCpu The cross context virtual CPU structure.
6525 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
6526 * out-of-sync. Make sure to update the required fields
6527 * before using them.
6528 *
6529 * @remarks No-long-jump zone!!!
6530 */
6531static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6532{
6533 NOREF(pMixedCtx);
6534
6535 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
6536 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_APIC_STATE);
6537 return VINF_SUCCESS;
6538}
6539
6540
6541/**
6542 * Saves the entire guest state from the currently active VMCS into the
6543 * guest-CPU context.
6544 *
6545 * This essentially VMREADs all guest-data.
6546 *
6547 * @returns VBox status code.
6548 * @param pVCpu The cross context virtual CPU structure.
6549 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6550 * out-of-sync. Make sure to update the required fields
6551 * before using them.
6552 */
6553static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6554{
6555 Assert(pVCpu);
6556 Assert(pMixedCtx);
6557
6558 if (HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL)
6559 return VINF_SUCCESS;
6560
6561 /* Though we can longjmp to ring-3 due to log-flushes here and get recalled
6562 again on the ring-3 callback path, there is no real need to. */
6563 if (VMMRZCallRing3IsEnabled(pVCpu))
6564 VMMR0LogFlushDisable(pVCpu);
6565 else
6566 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6567 Log4Func(("vcpu[%RU32]\n", pVCpu->idCpu));
6568
6569 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
6570 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6571
6572 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6573 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6574
6575 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6576 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6577
6578 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
6579 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6580
6581 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
6582 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDR7 failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6583
6584 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
6585 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6586
6587 rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
6588 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestLazyMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6589
6590 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
6591 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6592
6593 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
6594 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6595
6596 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
6597 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestApicState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
6598
6599 AssertMsg(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL,
6600 ("Missed guest state bits while saving state; missing %RX32 (got %RX32, want %RX32) - check log for any previous errors!\n",
6601 HMVMX_UPDATED_GUEST_ALL ^ HMVMXCPU_GST_VALUE(pVCpu), HMVMXCPU_GST_VALUE(pVCpu), HMVMX_UPDATED_GUEST_ALL));
6602
6603 if (VMMRZCallRing3IsEnabled(pVCpu))
6604 VMMR0LogFlushEnable(pVCpu);
6605
6606 return VINF_SUCCESS;
6607}
6608
6609
6610/**
6611 * Saves basic guest registers needed for IEM instruction execution.
6612 *
6613 * @returns VBox status code (OR-able).
6614 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
6615 * @param pMixedCtx Pointer to the CPU context of the guest.
6616 * @param fMemory Whether the instruction being executed operates on
6617 * memory or not. Only CR0 is synced up if clear.
6618 * @param fNeedRsp Need RSP (any instruction working on GPRs or stack).
6619 */
6620static int hmR0VmxSaveGuestRegsForIemExec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fMemory, bool fNeedRsp)
6621{
6622 /*
6623 * We assume all general purpose registers other than RSP are available.
6624 *
6625 * RIP is a must, as it will be incremented or otherwise changed.
6626 *
6627 * RFLAGS are always required to figure the CPL.
6628 *
6629 * RSP isn't always required, however it's a GPR, so frequently required.
6630 *
6631 * SS and CS are the only segment register needed if IEM doesn't do memory
6632 * access (CPL + 16/32/64-bit mode), but we can only get all segment registers.
6633 *
6634 * CR0 is always required by IEM for the CPL, while CR3 and CR4 will only
6635 * be required for memory accesses.
6636 *
6637 * Note! Before IEM dispatches an exception, it will call us to sync in everything.
6638 */
6639 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
6640 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6641 if (fNeedRsp)
6642 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
6643 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6644 if (!fMemory)
6645 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6646 else
6647 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6648 AssertRCReturn(rc, rc);
6649 return rc;
6650}
6651
6652
6653/**
6654 * Ensures that we've got a complete basic guest-context.
6655 *
6656 * This excludes the FPU, SSE, AVX, and similar extended state. The interface
6657 * is for the interpreter.
6658 *
6659 * @returns VBox status code.
6660 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
6661 * @param pMixedCtx Pointer to the guest-CPU context which may have data
6662 * needing to be synced in.
6663 * @thread EMT(pVCpu)
6664 */
6665VMMR0_INT_DECL(int) HMR0EnsureCompleteBasicContext(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6666{
6667 /* Note! Since this is only applicable to VT-x, the implementation is placed
6668 in the VT-x part of the sources instead of the generic stuff. */
6669 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
6670 return hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6671 return VINF_SUCCESS;
6672}
6673
6674
6675/**
6676 * Check per-VM and per-VCPU force flag actions that require us to go back to
6677 * ring-3 for one reason or another.
6678 *
6679 * @returns Strict VBox status code (i.e. informational status codes too)
6680 * @retval VINF_SUCCESS if we don't have any actions that require going back to
6681 * ring-3.
6682 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
6683 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
6684 * interrupts)
6685 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
6686 * all EMTs to be in ring-3.
6687 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
6688 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
6689 * to the EM loop.
6690 *
6691 * @param pVM The cross context VM structure.
6692 * @param pVCpu The cross context virtual CPU structure.
6693 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6694 * out-of-sync. Make sure to update the required fields
6695 * before using them.
6696 * @param fStepping Running in hmR0VmxRunGuestCodeStep().
6697 */
6698static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping)
6699{
6700 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6701
6702 /*
6703 * Anything pending? Should be more likely than not if we're doing a good job.
6704 */
6705 if ( !fStepping
6706 ? !VM_FF_IS_PENDING(pVM, VM_FF_HP_R0_PRE_HM_MASK)
6707 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
6708 : !VM_FF_IS_PENDING(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
6709 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
6710 return VINF_SUCCESS;
6711
6712 /* We need the control registers now, make sure the guest-CPU context is updated. */
6713 int rc3 = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
6714 AssertRCReturn(rc3, rc3);
6715
6716 /* Pending HM CR3 sync. */
6717 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6718 {
6719 int rc2 = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
6720 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
6721 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
6722 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6723 }
6724
6725 /* Pending HM PAE PDPEs. */
6726 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6727 {
6728 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6729 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6730 }
6731
6732 /* Pending PGM C3 sync. */
6733 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
6734 {
6735 VBOXSTRICTRC rcStrict2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4,
6736 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
6737 if (rcStrict2 != VINF_SUCCESS)
6738 {
6739 AssertRC(VBOXSTRICTRC_VAL(rcStrict2));
6740 Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict2)));
6741 return rcStrict2;
6742 }
6743 }
6744
6745 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
6746 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
6747 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
6748 {
6749 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
6750 int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
6751 Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
6752 return rc2;
6753 }
6754
6755 /* Pending VM request packets, such as hardware interrupts. */
6756 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
6757 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
6758 {
6759 Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
6760 return VINF_EM_PENDING_REQUEST;
6761 }
6762
6763 /* Pending PGM pool flushes. */
6764 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
6765 {
6766 Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
6767 return VINF_PGM_POOL_FLUSH_PENDING;
6768 }
6769
6770 /* Pending DMA requests. */
6771 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
6772 {
6773 Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
6774 return VINF_EM_RAW_TO_R3;
6775 }
6776
6777 return VINF_SUCCESS;
6778}
6779
6780
6781/**
6782 * Converts any TRPM trap into a pending HM event. This is typically used when
6783 * entering from ring-3 (not longjmp returns).
6784 *
6785 * @param pVCpu The cross context virtual CPU structure.
6786 */
6787static void hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
6788{
6789 Assert(TRPMHasTrap(pVCpu));
6790 Assert(!pVCpu->hm.s.Event.fPending);
6791
6792 uint8_t uVector;
6793 TRPMEVENT enmTrpmEvent;
6794 RTGCUINT uErrCode;
6795 RTGCUINTPTR GCPtrFaultAddress;
6796 uint8_t cbInstr;
6797
6798 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
6799 AssertRC(rc);
6800
6801 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntInfo. */
6802 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
6803 if (enmTrpmEvent == TRPM_TRAP)
6804 {
6805 switch (uVector)
6806 {
6807 case X86_XCPT_NMI:
6808 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6809 break;
6810
6811 case X86_XCPT_BP:
6812 case X86_XCPT_OF:
6813 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6814 break;
6815
6816 case X86_XCPT_PF:
6817 case X86_XCPT_DF:
6818 case X86_XCPT_TS:
6819 case X86_XCPT_NP:
6820 case X86_XCPT_SS:
6821 case X86_XCPT_GP:
6822 case X86_XCPT_AC:
6823 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6824 /* no break! */
6825 default:
6826 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6827 break;
6828 }
6829 }
6830 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
6831 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6832 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
6833 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6834 else
6835 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
6836
6837 rc = TRPMResetTrap(pVCpu);
6838 AssertRC(rc);
6839 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
6840 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
6841
6842 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
6843 STAM_COUNTER_DEC(&pVCpu->hm.s.StatInjectPendingReflect);
6844}
6845
6846
6847/**
6848 * Converts the pending HM event into a TRPM trap.
6849 *
6850 * @param pVCpu The cross context virtual CPU structure.
6851 */
6852static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
6853{
6854 Assert(pVCpu->hm.s.Event.fPending);
6855
6856 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
6857 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo);
6858 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntInfo);
6859 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
6860
6861 /* If a trap was already pending, we did something wrong! */
6862 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
6863
6864 TRPMEVENT enmTrapType;
6865 switch (uVectorType)
6866 {
6867 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
6868 enmTrapType = TRPM_HARDWARE_INT;
6869 break;
6870
6871 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
6872 enmTrapType = TRPM_SOFTWARE_INT;
6873 break;
6874
6875 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
6876 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
6877 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
6878 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
6879 enmTrapType = TRPM_TRAP;
6880 break;
6881
6882 default:
6883 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
6884 enmTrapType = TRPM_32BIT_HACK;
6885 break;
6886 }
6887
6888 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
6889
6890 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
6891 AssertRC(rc);
6892
6893 if (fErrorCodeValid)
6894 TRPMSetErrorCode(pVCpu, uErrorCode);
6895
6896 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6897 && uVector == X86_XCPT_PF)
6898 {
6899 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
6900 }
6901 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6902 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6903 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6904 {
6905 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6906 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
6907 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
6908 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
6909 }
6910
6911 /* Clear any pending events from the VMCS. */
6912 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0); AssertRC(rc);
6913 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0); AssertRC(rc);
6914
6915 /* We're now done converting the pending event. */
6916 pVCpu->hm.s.Event.fPending = false;
6917}
6918
6919
6920/**
6921 * Does the necessary state syncing before returning to ring-3 for any reason
6922 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
6923 *
6924 * @returns VBox status code.
6925 * @param pVM The cross context VM structure.
6926 * @param pVCpu The cross context virtual CPU structure.
6927 * @param pMixedCtx Pointer to the guest-CPU context. The data may
6928 * be out-of-sync. Make sure to update the required
6929 * fields before using them.
6930 * @param fSaveGuestState Whether to save the guest state or not.
6931 *
6932 * @remarks No-long-jmp zone!!!
6933 */
6934static int hmR0VmxLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fSaveGuestState)
6935{
6936 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6937 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6938
6939 RTCPUID idCpu = RTMpCpuId();
6940 Log4Func(("HostCpuId=%u\n", idCpu));
6941
6942 /*
6943 * !!! IMPORTANT !!!
6944 * If you modify code here, check whether hmR0VmxCallRing3Callback() needs to be updated too.
6945 */
6946
6947 /* Save the guest state if necessary. */
6948 if ( fSaveGuestState
6949 && HMVMXCPU_GST_VALUE(pVCpu) != HMVMX_UPDATED_GUEST_ALL)
6950 {
6951 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6952 AssertRCReturn(rc, rc);
6953 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
6954 }
6955
6956 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
6957 if (CPUMIsGuestFPUStateActive(pVCpu))
6958 {
6959 /* We shouldn't reload CR0 without saving it first. */
6960 if (!fSaveGuestState)
6961 {
6962 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6963 AssertRCReturn(rc, rc);
6964 }
6965 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
6966 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
6967 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
6968 }
6969
6970 /* Restore host debug registers if necessary and resync on next R0 reentry. */
6971#ifdef VBOX_STRICT
6972 if (CPUMIsHyperDebugStateActive(pVCpu))
6973 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
6974#endif
6975 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
6976 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
6977 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu));
6978 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu));
6979
6980#if HC_ARCH_BITS == 64
6981 /* Restore host-state bits that VT-x only restores partially. */
6982 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
6983 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
6984 {
6985 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu));
6986 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
6987 }
6988 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
6989#endif
6990
6991#if HC_ARCH_BITS == 64
6992 /* Restore the lazy host MSRs as we're leaving VT-x context. */
6993 if ( pVM->hm.s.fAllow64BitGuests
6994 && pVCpu->hm.s.vmx.fLazyMsrs)
6995 {
6996 /* We shouldn't reload the guest MSRs without saving it first. */
6997 if (!fSaveGuestState)
6998 {
6999 int rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
7000 AssertRCReturn(rc, rc);
7001 }
7002 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS));
7003 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7004 Assert(!pVCpu->hm.s.vmx.fLazyMsrs);
7005 }
7006#endif
7007
7008 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
7009 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7010
7011 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
7012 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
7013 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
7014 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
7015 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
7016 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
7017 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
7018 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7019
7020 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7021
7022 /** @todo This partially defeats the purpose of having preemption hooks.
7023 * The problem is, deregistering the hooks should be moved to a place that
7024 * lasts until the EMT is about to be destroyed not everytime while leaving HM
7025 * context.
7026 */
7027 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7028 {
7029 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7030 AssertRCReturn(rc, rc);
7031
7032 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7033 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
7034 }
7035 Assert(!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
7036 NOREF(idCpu);
7037
7038 return VINF_SUCCESS;
7039}
7040
7041
7042/**
7043 * Leaves the VT-x session.
7044 *
7045 * @returns VBox status code.
7046 * @param pVM The cross context VM structure.
7047 * @param pVCpu The cross context virtual CPU structure.
7048 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7049 * out-of-sync. Make sure to update the required fields
7050 * before using them.
7051 *
7052 * @remarks No-long-jmp zone!!!
7053 */
7054DECLINLINE(int) hmR0VmxLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7055{
7056 HM_DISABLE_PREEMPT();
7057 HMVMX_ASSERT_CPU_SAFE();
7058 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7059 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7060
7061 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
7062 and done this from the VMXR0ThreadCtxCallback(). */
7063 if (!pVCpu->hm.s.fLeaveDone)
7064 {
7065 int rc2 = hmR0VmxLeave(pVM, pVCpu, pMixedCtx, true /* fSaveGuestState */);
7066 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2);
7067 pVCpu->hm.s.fLeaveDone = true;
7068 }
7069 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
7070
7071 /*
7072 * !!! IMPORTANT !!!
7073 * If you modify code here, make sure to check whether hmR0VmxCallRing3Callback() needs to be updated too.
7074 */
7075
7076 /* Deregister hook now that we've left HM context before re-enabling preemption. */
7077 /** @todo Deregistering here means we need to VMCLEAR always
7078 * (longjmp/exit-to-r3) in VT-x which is not efficient. */
7079 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
7080 VMMR0ThreadCtxHookDisable(pVCpu);
7081
7082 /* Leave HM context. This takes care of local init (term). */
7083 int rc = HMR0LeaveCpu(pVCpu);
7084
7085 HM_RESTORE_PREEMPT();
7086 return rc;
7087}
7088
7089
7090/**
7091 * Does the necessary state syncing before doing a longjmp to ring-3.
7092 *
7093 * @returns VBox status code.
7094 * @param pVM The cross context VM structure.
7095 * @param pVCpu The cross context virtual CPU structure.
7096 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7097 * out-of-sync. Make sure to update the required fields
7098 * before using them.
7099 *
7100 * @remarks No-long-jmp zone!!!
7101 */
7102DECLINLINE(int) hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7103{
7104 return hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
7105}
7106
7107
7108/**
7109 * Take necessary actions before going back to ring-3.
7110 *
7111 * An action requires us to go back to ring-3. This function does the necessary
7112 * steps before we can safely return to ring-3. This is not the same as longjmps
7113 * to ring-3, this is voluntary and prepares the guest so it may continue
7114 * executing outside HM (recompiler/IEM).
7115 *
7116 * @returns VBox status code.
7117 * @param pVM The cross context VM structure.
7118 * @param pVCpu The cross context virtual CPU structure.
7119 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7120 * out-of-sync. Make sure to update the required fields
7121 * before using them.
7122 * @param rcExit The reason for exiting to ring-3. Can be
7123 * VINF_VMM_UNKNOWN_RING3_CALL.
7124 */
7125static int hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, VBOXSTRICTRC rcExit)
7126{
7127 Assert(pVM);
7128 Assert(pVCpu);
7129 Assert(pMixedCtx);
7130 HMVMX_ASSERT_PREEMPT_SAFE();
7131
7132 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
7133 {
7134 VMXGetActivatedVmcs(&pVCpu->hm.s.vmx.LastError.u64VMCSPhys);
7135 pVCpu->hm.s.vmx.LastError.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
7136 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
7137 /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */
7138 }
7139
7140 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
7141 VMMRZCallRing3Disable(pVCpu);
7142 Log4(("hmR0VmxExitToRing3: pVCpu=%p idCpu=%RU32 rcExit=%d\n", pVCpu, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcExit)));
7143
7144 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
7145 if (pVCpu->hm.s.Event.fPending)
7146 {
7147 hmR0VmxPendingEventToTrpmTrap(pVCpu);
7148 Assert(!pVCpu->hm.s.Event.fPending);
7149 }
7150
7151 /* Clear interrupt-window and NMI-window controls as we re-evaluate it when we return from ring-3. */
7152 hmR0VmxClearIntNmiWindowsVmcs(pVCpu);
7153
7154 /* If we're emulating an instruction, we shouldn't have any TRPM traps pending
7155 and if we're injecting an event we should have a TRPM trap pending. */
7156 AssertMsg(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
7157#ifndef DEBUG_bird /* Triggered after firing an NMI against NT4SP1, possibly a tripple fault in progress. */
7158 AssertMsg(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
7159#endif
7160
7161 /* Save guest state and restore host state bits. */
7162 int rc = hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
7163 AssertRCReturn(rc, rc);
7164 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7165 /* Thread-context hooks are unregistered at this point!!! */
7166
7167 /* Sync recompiler state. */
7168 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
7169 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
7170 | CPUM_CHANGED_LDTR
7171 | CPUM_CHANGED_GDTR
7172 | CPUM_CHANGED_IDTR
7173 | CPUM_CHANGED_TR
7174 | CPUM_CHANGED_HIDDEN_SEL_REGS);
7175 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
7176 if ( pVM->hm.s.fNestedPaging
7177 && CPUMIsGuestPagingEnabledEx(pMixedCtx))
7178 {
7179 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
7180 }
7181
7182 Assert(!pVCpu->hm.s.fClearTrapFlag);
7183
7184 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
7185 if (rcExit != VINF_EM_RAW_INTERRUPT)
7186 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
7187
7188 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
7189
7190 /* We do -not- want any longjmp notifications after this! We must return to ring-3 ASAP. */
7191 VMMRZCallRing3RemoveNotification(pVCpu);
7192 VMMRZCallRing3Enable(pVCpu);
7193
7194 return rc;
7195}
7196
7197
7198/**
7199 * VMMRZCallRing3() callback wrapper which saves the guest state before we
7200 * longjump to ring-3 and possibly get preempted.
7201 *
7202 * @returns VBox status code.
7203 * @param pVCpu The cross context virtual CPU structure.
7204 * @param enmOperation The operation causing the ring-3 longjump.
7205 * @param pvUser Opaque pointer to the guest-CPU context. The data
7206 * may be out-of-sync. Make sure to update the required
7207 * fields before using them.
7208 */
7209static DECLCALLBACK(int) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
7210{
7211 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
7212 {
7213 /*
7214 * !!! IMPORTANT !!!
7215 * If you modify code here, check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs to be updated too.
7216 * This is a stripped down version which gets out ASAP, trying to not trigger any further assertions.
7217 */
7218 VMMRZCallRing3RemoveNotification(pVCpu);
7219 VMMRZCallRing3Disable(pVCpu);
7220 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
7221 RTThreadPreemptDisable(&PreemptState);
7222
7223 PVM pVM = pVCpu->CTX_SUFF(pVM);
7224 if (CPUMIsGuestFPUStateActive(pVCpu))
7225 CPUMR0SaveGuestFPU(pVM, pVCpu, (PCPUMCTX)pvUser);
7226
7227 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
7228
7229#if HC_ARCH_BITS == 64
7230 /* Restore host-state bits that VT-x only restores partially. */
7231 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
7232 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
7233 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
7234 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
7235
7236 /* Restore the lazy host MSRs as we're leaving VT-x context. */
7237 if ( pVM->hm.s.fAllow64BitGuests
7238 && pVCpu->hm.s.vmx.fLazyMsrs)
7239 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7240#endif
7241 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
7242 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7243 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7244 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7245 {
7246 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7247 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7248 }
7249
7250 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
7251 VMMR0ThreadCtxHookDisable(pVCpu);
7252 HMR0LeaveCpu(pVCpu);
7253 RTThreadPreemptRestore(&PreemptState);
7254 return VINF_SUCCESS;
7255 }
7256
7257 Assert(pVCpu);
7258 Assert(pvUser);
7259 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7260 HMVMX_ASSERT_PREEMPT_SAFE();
7261
7262 VMMRZCallRing3Disable(pVCpu);
7263 Assert(VMMR0IsLogFlushDisabled(pVCpu));
7264
7265 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32 enmOperation=%d\n", pVCpu, pVCpu->idCpu,
7266 enmOperation));
7267
7268 int rc = hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
7269 AssertRCReturn(rc, rc);
7270
7271 VMMRZCallRing3Enable(pVCpu);
7272 return VINF_SUCCESS;
7273}
7274
7275
7276/**
7277 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
7278 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
7279 *
7280 * @param pVCpu The cross context virtual CPU structure.
7281 */
7282DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
7283{
7284 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7285 {
7286 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7287 {
7288 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7289 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7290 AssertRC(rc);
7291 Log4(("Setup interrupt-window exiting\n"));
7292 }
7293 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
7294}
7295
7296
7297/**
7298 * Clears the interrupt-window exiting control in the VMCS.
7299 *
7300 * @param pVCpu The cross context virtual CPU structure.
7301 */
7302DECLINLINE(void) hmR0VmxClearIntWindowExitVmcs(PVMCPU pVCpu)
7303{
7304 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7305 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7306 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7307 AssertRC(rc);
7308 Log4(("Cleared interrupt-window exiting\n"));
7309}
7310
7311
7312/**
7313 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
7314 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
7315 *
7316 * @param pVCpu The cross context virtual CPU structure.
7317 */
7318DECLINLINE(void) hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu)
7319{
7320 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7321 {
7322 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7323 {
7324 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7325 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7326 AssertRC(rc);
7327 Log4(("Setup NMI-window exiting\n"));
7328 }
7329 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
7330}
7331
7332
7333/**
7334 * Clears the NMI-window exiting control in the VMCS.
7335 *
7336 * @param pVCpu The cross context virtual CPU structure.
7337 */
7338DECLINLINE(void) hmR0VmxClearNmiWindowExitVmcs(PVMCPU pVCpu)
7339{
7340 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT);
7341 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7342 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7343 AssertRC(rc);
7344 Log4(("Cleared NMI-window exiting\n"));
7345}
7346
7347
7348/**
7349 * Evaluates the event to be delivered to the guest and sets it as the pending
7350 * event.
7351 *
7352 * @param pVCpu The cross context virtual CPU structure.
7353 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7354 * out-of-sync. Make sure to update the required fields
7355 * before using them.
7356 */
7357static void hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7358{
7359 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
7360 uint32_t const uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7361 bool const fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7362 bool const fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7363 bool const fBlockNmi = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
7364
7365 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7366 Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7367 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7368 Assert(!TRPMHasTrap(pVCpu));
7369
7370 /*
7371 * Toggling of interrupt force-flags here is safe since we update TRPM on premature exits
7372 * to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must NOT restore these force-flags.
7373 */
7374 /** @todo SMI. SMIs take priority over NMIs. */
7375 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */
7376 {
7377 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
7378 if ( !pVCpu->hm.s.Event.fPending
7379 && !fBlockNmi
7380 && !fBlockSti
7381 && !fBlockMovSS)
7382 {
7383 Log4(("Pending NMI vcpu[%RU32]\n", pVCpu->idCpu));
7384 uint32_t u32IntInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
7385 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7386
7387 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7388 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
7389 }
7390 else
7391 hmR0VmxSetNmiWindowExitVmcs(pVCpu);
7392 }
7393 /*
7394 * Check if the guest can receive external interrupts (PIC/APIC). Once we do PDMGetInterrupt() we -must- deliver
7395 * the interrupt ASAP. We must not execute any guest code until we inject the interrupt.
7396 */
7397 else if ( VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
7398 && !pVCpu->hm.s.fSingleInstruction)
7399 {
7400 Assert(!DBGFIsStepping(pVCpu));
7401 int rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7402 AssertRC(rc);
7403 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7404 if ( !pVCpu->hm.s.Event.fPending
7405 && !fBlockInt
7406 && !fBlockSti
7407 && !fBlockMovSS)
7408 {
7409 uint8_t u8Interrupt;
7410 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
7411 if (RT_SUCCESS(rc))
7412 {
7413 Log4(("Pending interrupt vcpu[%RU32] u8Interrupt=%#x \n", pVCpu->idCpu, u8Interrupt));
7414 uint32_t u32IntInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID;
7415 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7416
7417 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrfaultAddress */);
7418 }
7419 else
7420 {
7421 /** @todo Does this actually happen? If not turn it into an assertion. */
7422 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
7423 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
7424 }
7425 }
7426 else
7427 hmR0VmxSetIntWindowExitVmcs(pVCpu);
7428 }
7429}
7430
7431
7432/**
7433 * Sets a pending-debug exception to be delivered to the guest if the guest is
7434 * single-stepping in the VMCS.
7435 *
7436 * @param pVCpu The cross context virtual CPU structure.
7437 */
7438DECLINLINE(void) hmR0VmxSetPendingDebugXcptVmcs(PVMCPU pVCpu)
7439{
7440 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS)); NOREF(pVCpu);
7441 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
7442 AssertRC(rc);
7443}
7444
7445
7446/**
7447 * Injects any pending events into the guest if the guest is in a state to
7448 * receive them.
7449 *
7450 * @returns Strict VBox status code (i.e. informational status codes too).
7451 * @param pVCpu The cross context virtual CPU structure.
7452 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7453 * out-of-sync. Make sure to update the required fields
7454 * before using them.
7455 * @param fStepping Running in hmR0VmxRunGuestCodeStep() and we should
7456 * return VINF_EM_DBG_STEPPED if the event was
7457 * dispatched directly.
7458 */
7459static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping)
7460{
7461 HMVMX_ASSERT_PREEMPT_SAFE();
7462 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7463
7464 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
7465 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7466 bool fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7467 bool fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7468
7469 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));
7470 Assert(!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7471 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7472 Assert(!TRPMHasTrap(pVCpu));
7473
7474 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
7475 if (pVCpu->hm.s.Event.fPending)
7476 {
7477 /*
7478 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
7479 * pending even while injecting an event and in this case, we want a VM-exit as soon as
7480 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
7481 *
7482 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
7483 */
7484 uint32_t const uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
7485#ifdef VBOX_STRICT
7486 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7487 {
7488 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7489 Assert(!fBlockInt);
7490 Assert(!fBlockSti);
7491 Assert(!fBlockMovSS);
7492 }
7493 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
7494 {
7495 bool const fBlockNmi = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
7496 Assert(!fBlockSti);
7497 Assert(!fBlockMovSS);
7498 Assert(!fBlockNmi);
7499 }
7500#endif
7501 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#x\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
7502 (uint8_t)uIntType));
7503 rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
7504 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress,
7505 fStepping, &uIntrState);
7506 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
7507
7508 /* Update the interruptibility-state as it could have been changed by
7509 hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */
7510 fBlockMovSS = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7511 fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7512
7513 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7514 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
7515 else
7516 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
7517 }
7518
7519 /* Deliver pending debug exception if the guest is single-stepping. Evaluate and set the BS bit. */
7520 if ( fBlockSti
7521 || fBlockMovSS)
7522 {
7523 if (!pVCpu->hm.s.fSingleInstruction)
7524 {
7525 /*
7526 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD,
7527 * VMX_EXIT_MTF, VMX_EXIT_APIC_WRITE and VMX_EXIT_VIRTUALIZED_EOI.
7528 * See Intel spec. 27.3.4 "Saving Non-Register State".
7529 */
7530 Assert(!DBGFIsStepping(pVCpu));
7531 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
7532 AssertRCReturn(rc2, rc2);
7533 if (pMixedCtx->eflags.Bits.u1TF)
7534 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
7535 }
7536 else if (pMixedCtx->eflags.Bits.u1TF)
7537 {
7538 /*
7539 * We are single-stepping in the hypervisor debugger using EFLAGS.TF. Clear interrupt inhibition as setting the
7540 * BS bit would mean delivering a #DB to the guest upon VM-entry when it shouldn't be.
7541 */
7542 Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG));
7543 uIntrState = 0;
7544 }
7545 }
7546
7547 /*
7548 * There's no need to clear the VM-entry interruption-information field here if we're not injecting anything.
7549 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7550 */
7551 int rc2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
7552 AssertRC(rc2);
7553
7554 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
7555 NOREF(fBlockMovSS); NOREF(fBlockSti);
7556 return rcStrict;
7557}
7558
7559
7560/**
7561 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
7562 *
7563 * @param pVCpu The cross context virtual CPU structure.
7564 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7565 * out-of-sync. Make sure to update the required fields
7566 * before using them.
7567 */
7568DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7569{
7570 NOREF(pMixedCtx);
7571 uint32_t u32IntInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID;
7572 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7573}
7574
7575
7576/**
7577 * Injects a double-fault (\#DF) exception into the VM.
7578 *
7579 * @returns Strict VBox status code (i.e. informational status codes too).
7580 * @param pVCpu The cross context virtual CPU structure.
7581 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7582 * out-of-sync. Make sure to update the required fields
7583 * before using them.
7584 * @param fStepping Whether we're running in hmR0VmxRunGuestCodeStep()
7585 * and should return VINF_EM_DBG_STEPPED if the event
7586 * is injected directly (register modified by us, not
7587 * by hardware on VM-entry).
7588 * @param puIntrState Pointer to the current guest interruptibility-state.
7589 * This interruptibility-state will be updated if
7590 * necessary. This cannot not be NULL.
7591 */
7592DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping, uint32_t *puIntrState)
7593{
7594 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7595 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7596 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7597 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
7598 fStepping, puIntrState);
7599}
7600
7601
7602/**
7603 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
7604 *
7605 * @param pVCpu The cross context virtual CPU structure.
7606 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7607 * out-of-sync. Make sure to update the required fields
7608 * before using them.
7609 */
7610DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7611{
7612 NOREF(pMixedCtx);
7613 uint32_t u32IntInfo = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID;
7614 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7615 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7616}
7617
7618
7619/**
7620 * Sets an overflow (\#OF) exception as pending-for-injection into the VM.
7621 *
7622 * @param pVCpu The cross context virtual CPU structure.
7623 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7624 * out-of-sync. Make sure to update the required fields
7625 * before using them.
7626 * @param cbInstr The value of RIP that is to be pushed on the guest
7627 * stack.
7628 */
7629DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
7630{
7631 NOREF(pMixedCtx);
7632 uint32_t u32IntInfo = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7633 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7634 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7635}
7636
7637
7638/**
7639 * Injects a general-protection (\#GP) fault into the VM.
7640 *
7641 * @returns Strict VBox status code (i.e. informational status codes too).
7642 * @param pVCpu The cross context virtual CPU structure.
7643 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7644 * out-of-sync. Make sure to update the required fields
7645 * before using them.
7646 * @param fErrorCodeValid Whether the error code is valid (depends on the CPU
7647 * mode, i.e. in real-mode it's not valid).
7648 * @param u32ErrorCode The error code associated with the \#GP.
7649 * @param fStepping Whether we're running in
7650 * hmR0VmxRunGuestCodeStep() and should return
7651 * VINF_EM_DBG_STEPPED if the event is injected
7652 * directly (register modified by us, not by
7653 * hardware on VM-entry).
7654 * @param puIntrState Pointer to the current guest interruptibility-state.
7655 * This interruptibility-state will be updated if
7656 * necessary. This cannot not be NULL.
7657 */
7658DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
7659 bool fStepping, uint32_t *puIntrState)
7660{
7661 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7662 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7663 if (fErrorCodeValid)
7664 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7665 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
7666 fStepping, puIntrState);
7667}
7668
7669
7670/**
7671 * Sets a general-protection (\#GP) exception as pending-for-injection into the
7672 * VM.
7673 *
7674 * @param pVCpu The cross context virtual CPU structure.
7675 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7676 * out-of-sync. Make sure to update the required fields
7677 * before using them.
7678 * @param u32ErrorCode The error code associated with the \#GP.
7679 */
7680DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t u32ErrorCode)
7681{
7682 NOREF(pMixedCtx);
7683 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7684 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7685 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7686 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */);
7687}
7688
7689
7690/**
7691 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
7692 *
7693 * @param pVCpu The cross context virtual CPU structure.
7694 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7695 * out-of-sync. Make sure to update the required fields
7696 * before using them.
7697 * @param uVector The software interrupt vector number.
7698 * @param cbInstr The value of RIP that is to be pushed on the guest
7699 * stack.
7700 */
7701DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
7702{
7703 NOREF(pMixedCtx);
7704 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
7705 if ( uVector == X86_XCPT_BP
7706 || uVector == X86_XCPT_OF)
7707 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7708 else
7709 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7710 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7711}
7712
7713
7714/**
7715 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
7716 * stack.
7717 *
7718 * @returns Strict VBox status code (i.e. informational status codes too).
7719 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
7720 * @param pVM The cross context VM structure.
7721 * @param pMixedCtx Pointer to the guest-CPU context.
7722 * @param uValue The value to push to the guest stack.
7723 */
7724DECLINLINE(VBOXSTRICTRC) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
7725{
7726 /*
7727 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
7728 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
7729 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
7730 */
7731 if (pMixedCtx->sp == 1)
7732 return VINF_EM_RESET;
7733 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
7734 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
7735 AssertRC(rc);
7736 return rc;
7737}
7738
7739
7740/**
7741 * Injects an event into the guest upon VM-entry by updating the relevant fields
7742 * in the VM-entry area in the VMCS.
7743 *
7744 * @returns Strict VBox status code (i.e. informational status codes too).
7745 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
7746 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
7747 *
7748 * @param pVCpu The cross context virtual CPU structure.
7749 * @param pMixedCtx Pointer to the guest-CPU context. The data may
7750 * be out-of-sync. Make sure to update the required
7751 * fields before using them.
7752 * @param u64IntInfo The VM-entry interruption-information field.
7753 * @param cbInstr The VM-entry instruction length in bytes (for
7754 * software interrupts, exceptions and privileged
7755 * software exceptions).
7756 * @param u32ErrCode The VM-entry exception error code.
7757 * @param GCPtrFaultAddress The page-fault address for \#PF exceptions.
7758 * @param puIntrState Pointer to the current guest interruptibility-state.
7759 * This interruptibility-state will be updated if
7760 * necessary. This cannot not be NULL.
7761 * @param fStepping Whether we're running in
7762 * hmR0VmxRunGuestCodeStep() and should return
7763 * VINF_EM_DBG_STEPPED if the event is injected
7764 * directly (register modified by us, not by
7765 * hardware on VM-entry).
7766 *
7767 * @remarks Requires CR0!
7768 * @remarks No-long-jump zone!!!
7769 */
7770static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
7771 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, bool fStepping,
7772 uint32_t *puIntrState)
7773{
7774 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
7775 AssertMsg(u64IntInfo >> 32 == 0, ("%#RX64\n", u64IntInfo));
7776 Assert(puIntrState);
7777 uint32_t u32IntInfo = (uint32_t)u64IntInfo;
7778
7779 uint32_t const uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntInfo);
7780 uint32_t const uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo);
7781
7782#ifdef VBOX_STRICT
7783 /* Validate the error-code-valid bit for hardware exceptions. */
7784 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT)
7785 {
7786 switch (uVector)
7787 {
7788 case X86_XCPT_PF:
7789 case X86_XCPT_DF:
7790 case X86_XCPT_TS:
7791 case X86_XCPT_NP:
7792 case X86_XCPT_SS:
7793 case X86_XCPT_GP:
7794 case X86_XCPT_AC:
7795 AssertMsg(VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo),
7796 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
7797 /* fallthru */
7798 default:
7799 break;
7800 }
7801 }
7802#endif
7803
7804 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
7805 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
7806 || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
7807
7808 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
7809
7810 /* We require CR0 to check if the guest is in real-mode. */
7811 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
7812 AssertRCReturn(rc, rc);
7813
7814 /*
7815 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
7816 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
7817 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
7818 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
7819 */
7820 if (CPUMIsGuestInRealModeEx(pMixedCtx))
7821 {
7822 PVM pVM = pVCpu->CTX_SUFF(pVM);
7823 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
7824 {
7825 Assert(PDMVmmDevHeapIsEnabled(pVM));
7826 Assert(pVM->hm.s.vmx.pRealModeTSS);
7827
7828 /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */
7829 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
7830 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
7831 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
7832 AssertRCReturn(rc, rc);
7833 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP));
7834
7835 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
7836 size_t const cbIdtEntry = sizeof(X86IDTR16);
7837 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
7838 {
7839 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
7840 if (uVector == X86_XCPT_DF)
7841 return VINF_EM_RESET;
7842
7843 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
7844 if (uVector == X86_XCPT_GP)
7845 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, puIntrState);
7846
7847 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
7848 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
7849 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */,
7850 fStepping, puIntrState);
7851 }
7852
7853 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
7854 uint16_t uGuestIp = pMixedCtx->ip;
7855 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
7856 {
7857 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
7858 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
7859 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
7860 }
7861 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
7862 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
7863
7864 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
7865 X86IDTR16 IdtEntry;
7866 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
7867 rc = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
7868 AssertRCReturn(rc, rc);
7869
7870 /* Construct the stack frame for the interrupt/exception handler. */
7871 VBOXSTRICTRC rcStrict;
7872 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
7873 if (rcStrict == VINF_SUCCESS)
7874 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
7875 if (rcStrict == VINF_SUCCESS)
7876 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
7877
7878 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
7879 if (rcStrict == VINF_SUCCESS)
7880 {
7881 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
7882 pMixedCtx->rip = IdtEntry.offSel;
7883 pMixedCtx->cs.Sel = IdtEntry.uSel;
7884 pMixedCtx->cs.ValidSel = IdtEntry.uSel;
7885 pMixedCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
7886 if ( uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
7887 && uVector == X86_XCPT_PF)
7888 pMixedCtx->cr2 = GCPtrFaultAddress;
7889
7890 /* If any other guest-state bits are changed here, make sure to update
7891 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */
7892 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS
7893 | HM_CHANGED_GUEST_RIP
7894 | HM_CHANGED_GUEST_RFLAGS
7895 | HM_CHANGED_GUEST_RSP);
7896
7897 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
7898 if (*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
7899 {
7900 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
7901 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
7902 Log4(("Clearing inhibition due to STI.\n"));
7903 *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
7904 }
7905 Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
7906 u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->eflags.u, pMixedCtx->cs.Sel, pMixedCtx->eip));
7907
7908 /* The event has been truly dispatched. Mark it as no longer pending so we don't attempt to 'undo'
7909 it, if we are returning to ring-3 before executing guest code. */
7910 pVCpu->hm.s.Event.fPending = false;
7911
7912 /* Make hmR0VmxPreRunGuest return if we're stepping since we've changed cs:rip. */
7913 if (fStepping)
7914 rcStrict = VINF_EM_DBG_STEPPED;
7915 }
7916 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
7917 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7918 return rcStrict;
7919 }
7920
7921 /*
7922 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
7923 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
7924 */
7925 u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7926 }
7927
7928 /* Validate. */
7929 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
7930 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(u32IntInfo)); /* Bit 12 MBZ. */
7931 Assert(!(u32IntInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
7932
7933 /* Inject. */
7934 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
7935 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo))
7936 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
7937 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
7938
7939 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
7940 && uVector == X86_XCPT_PF)
7941 pMixedCtx->cr2 = GCPtrFaultAddress;
7942
7943 Log4(("Injecting vcpu[%RU32] u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", pVCpu->idCpu,
7944 u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
7945
7946 AssertRCReturn(rc, rc);
7947 return VINF_SUCCESS;
7948}
7949
7950
7951/**
7952 * Clears the interrupt-window exiting control in the VMCS and if necessary
7953 * clears the current event in the VMCS as well.
7954 *
7955 * @returns VBox status code.
7956 * @param pVCpu The cross context virtual CPU structure.
7957 *
7958 * @remarks Use this function only to clear events that have not yet been
7959 * delivered to the guest but are injected in the VMCS!
7960 * @remarks No-long-jump zone!!!
7961 */
7962static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu)
7963{
7964 Log4Func(("vcpu[%d]\n", pVCpu->idCpu));
7965
7966 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
7967 hmR0VmxClearIntWindowExitVmcs(pVCpu);
7968
7969 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)
7970 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
7971}
7972
7973
7974/**
7975 * Enters the VT-x session.
7976 *
7977 * @returns VBox status code.
7978 * @param pVM The cross context VM structure.
7979 * @param pVCpu The cross context virtual CPU structure.
7980 * @param pCpu Pointer to the CPU info struct.
7981 */
7982VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
7983{
7984 AssertPtr(pVM);
7985 AssertPtr(pVCpu);
7986 Assert(pVM->hm.s.vmx.fSupported);
7987 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7988 NOREF(pCpu); NOREF(pVM);
7989
7990 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
7991 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
7992
7993#ifdef VBOX_STRICT
7994 /* At least verify VMX is enabled, since we can't check if we're in VMX root mode without #GP'ing. */
7995 RTCCUINTREG uHostCR4 = ASMGetCR4();
7996 if (!(uHostCR4 & X86_CR4_VMXE))
7997 {
7998 LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
7999 return VERR_VMX_X86_CR4_VMXE_CLEARED;
8000 }
8001#endif
8002
8003 /*
8004 * Load the VCPU's VMCS as the current (and active) one.
8005 */
8006 Assert(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR);
8007 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8008 if (RT_FAILURE(rc))
8009 return rc;
8010
8011 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8012 pVCpu->hm.s.fLeaveDone = false;
8013 Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8014
8015 return VINF_SUCCESS;
8016}
8017
8018
8019/**
8020 * The thread-context callback (only on platforms which support it).
8021 *
8022 * @param enmEvent The thread-context event.
8023 * @param pVCpu The cross context virtual CPU structure.
8024 * @param fGlobalInit Whether global VT-x/AMD-V init. was used.
8025 * @thread EMT(pVCpu)
8026 */
8027VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
8028{
8029 NOREF(fGlobalInit);
8030
8031 switch (enmEvent)
8032 {
8033 case RTTHREADCTXEVENT_OUT:
8034 {
8035 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8036 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
8037 VMCPU_ASSERT_EMT(pVCpu);
8038
8039 PVM pVM = pVCpu->CTX_SUFF(pVM);
8040 PCPUMCTX pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu);
8041
8042 /* No longjmps (logger flushes, locks) in this fragile context. */
8043 VMMRZCallRing3Disable(pVCpu);
8044 Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId()));
8045
8046 /*
8047 * Restore host-state (FPU, debug etc.)
8048 */
8049 if (!pVCpu->hm.s.fLeaveDone)
8050 {
8051 /* Do -not- save guest-state here as we might already be in the middle of saving it (esp. bad if we are
8052 holding the PGM lock while saving the guest state (see hmR0VmxSaveGuestControlRegs()). */
8053 hmR0VmxLeave(pVM, pVCpu, pMixedCtx, false /* fSaveGuestState */);
8054 pVCpu->hm.s.fLeaveDone = true;
8055 }
8056
8057 /* Leave HM context, takes care of local init (term). */
8058 int rc = HMR0LeaveCpu(pVCpu);
8059 AssertRC(rc); NOREF(rc);
8060
8061 /* Restore longjmp state. */
8062 VMMRZCallRing3Enable(pVCpu);
8063 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreempt);
8064 break;
8065 }
8066
8067 case RTTHREADCTXEVENT_IN:
8068 {
8069 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8070 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
8071 VMCPU_ASSERT_EMT(pVCpu);
8072
8073 /* No longjmps here, as we don't want to trigger preemption (& its hook) while resuming. */
8074 VMMRZCallRing3Disable(pVCpu);
8075 Log4Func(("Resumed: HostCpuId=%u\n", RTMpCpuId()));
8076
8077 /* Initialize the bare minimum state required for HM. This takes care of
8078 initializing VT-x if necessary (onlined CPUs, local init etc.) */
8079 int rc = HMR0EnterCpu(pVCpu);
8080 AssertRC(rc);
8081 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
8082
8083 /* Load the active VMCS as the current one. */
8084 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR)
8085 {
8086 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8087 AssertRC(rc); NOREF(rc);
8088 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8089 Log4Func(("Resumed: Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8090 }
8091 pVCpu->hm.s.fLeaveDone = false;
8092
8093 /* Restore longjmp state. */
8094 VMMRZCallRing3Enable(pVCpu);
8095 break;
8096 }
8097
8098 default:
8099 break;
8100 }
8101}
8102
8103
8104/**
8105 * Saves the host state in the VMCS host-state.
8106 * Sets up the VM-exit MSR-load area.
8107 *
8108 * The CPU state will be loaded from these fields on every successful VM-exit.
8109 *
8110 * @returns VBox status code.
8111 * @param pVM The cross context VM structure.
8112 * @param pVCpu The cross context virtual CPU structure.
8113 *
8114 * @remarks No-long-jump zone!!!
8115 */
8116static int hmR0VmxSaveHostState(PVM pVM, PVMCPU pVCpu)
8117{
8118 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8119
8120 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
8121 return VINF_SUCCESS;
8122
8123 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
8124 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8125
8126 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
8127 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8128
8129 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
8130 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8131
8132 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
8133 return rc;
8134}
8135
8136
8137/**
8138 * Saves the host state in the VMCS host-state.
8139 *
8140 * @returns VBox status code.
8141 * @param pVM The cross context VM structure.
8142 * @param pVCpu The cross context virtual CPU structure.
8143 *
8144 * @remarks No-long-jump zone!!!
8145 */
8146VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
8147{
8148 AssertPtr(pVM);
8149 AssertPtr(pVCpu);
8150
8151 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8152
8153 /* Save the host state here while entering HM context. When thread-context hooks are used, we might get preempted
8154 and have to resave the host state but most of the time we won't be, so do it here before we disable interrupts. */
8155 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8156 return hmR0VmxSaveHostState(pVM, pVCpu);
8157}
8158
8159
8160/**
8161 * Loads the guest state into the VMCS guest-state area.
8162 *
8163 * The will typically be done before VM-entry when the guest-CPU state and the
8164 * VMCS state may potentially be out of sync.
8165 *
8166 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas. Sets up the
8167 * VM-entry controls.
8168 * Sets up the appropriate VMX non-root function to execute guest code based on
8169 * the guest CPU mode.
8170 *
8171 * @returns VBox status code.
8172 * @param pVM The cross context VM structure.
8173 * @param pVCpu The cross context virtual CPU structure.
8174 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8175 * out-of-sync. Make sure to update the required fields
8176 * before using them.
8177 *
8178 * @remarks No-long-jump zone!!!
8179 */
8180static int hmR0VmxLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
8181{
8182 AssertPtr(pVM);
8183 AssertPtr(pVCpu);
8184 AssertPtr(pMixedCtx);
8185 HMVMX_ASSERT_PREEMPT_SAFE();
8186
8187 VMMRZCallRing3Disable(pVCpu);
8188 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8189
8190 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
8191
8192 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
8193
8194 /* Determine real-on-v86 mode. */
8195 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
8196 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
8197 && CPUMIsGuestInRealModeEx(pMixedCtx))
8198 {
8199 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
8200 }
8201
8202 /*
8203 * Load the guest-state into the VMCS.
8204 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
8205 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
8206 */
8207 int rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
8208 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8209
8210 /* This needs to be done after hmR0VmxSetupVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */
8211 rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
8212 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8213
8214 /* This needs to be done after hmR0VmxSetupVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */
8215 rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
8216 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8217
8218 rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
8219 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8220
8221 rc = hmR0VmxLoadGuestCR3AndCR4(pVCpu, pMixedCtx);
8222 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestCR3AndCR4: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8223
8224 /* Assumes pMixedCtx->cr0 is up-to-date (strict builds require CR0 for segment register validation checks). */
8225 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
8226 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8227
8228 /* This needs to be done after hmR0VmxLoadGuestEntryCtls() and hmR0VmxLoadGuestExitCtls() as it may alter controls if we
8229 determine we don't have to swap EFER after all. */
8230 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
8231 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadSharedMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8232
8233 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
8234 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8235
8236 rc = hmR0VmxLoadGuestXcptIntercepts(pVCpu, pMixedCtx);
8237 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestXcptIntercepts! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8238
8239 /*
8240 * Loading Rflags here is fine, even though Rflags.TF might depend on guest debug state (which is not loaded here).
8241 * It is re-evaluated and updated if necessary in hmR0VmxLoadSharedState().
8242 */
8243 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
8244 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestRipRspRflags! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
8245
8246 /* Clear any unused and reserved bits. */
8247 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2);
8248
8249 VMMRZCallRing3Enable(pVCpu);
8250
8251 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
8252 return rc;
8253}
8254
8255
8256/**
8257 * Loads the state shared between the host and guest into the VMCS.
8258 *
8259 * @param pVM The cross context VM structure.
8260 * @param pVCpu The cross context virtual CPU structure.
8261 * @param pCtx Pointer to the guest-CPU context.
8262 *
8263 * @remarks No-long-jump zone!!!
8264 */
8265static void hmR0VmxLoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8266{
8267 NOREF(pVM);
8268
8269 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8270 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8271
8272 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))
8273 {
8274 int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx);
8275 AssertRC(rc);
8276 }
8277
8278 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG))
8279 {
8280 int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx);
8281 AssertRC(rc);
8282
8283 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
8284 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))
8285 {
8286 rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx);
8287 AssertRC(rc);
8288 }
8289 }
8290
8291 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS))
8292 {
8293#if HC_ARCH_BITS == 64
8294 if (pVM->hm.s.fAllow64BitGuests)
8295 hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
8296#endif
8297 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
8298 }
8299
8300 /* Loading CR0, debug state might have changed intercepts, update VMCS. */
8301 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS))
8302 {
8303 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_AC));
8304 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB));
8305 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
8306 AssertRC(rc);
8307 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS);
8308 }
8309
8310 AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE),
8311 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8312}
8313
8314
8315/**
8316 * Worker for loading the guest-state bits in the inner VT-x execution loop.
8317 *
8318 * @returns Strict VBox status code (i.e. informational status codes too).
8319 * @param pVM The cross context VM structure.
8320 * @param pVCpu The cross context virtual CPU structure.
8321 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8322 * out-of-sync. Make sure to update the required fields
8323 * before using them.
8324 */
8325static VBOXSTRICTRC hmR0VmxLoadGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
8326{
8327 HMVMX_ASSERT_PREEMPT_SAFE();
8328
8329 Log5(("LoadFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8330#ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
8331 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
8332#endif
8333
8334 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
8335 if (HMCPU_CF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP))
8336 {
8337 rcStrict = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
8338 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8339 { /* likely */}
8340 else
8341 {
8342 AssertLogRelMsgFailedReturn(("hmR0VmxLoadGuestStateOptimal: hmR0VmxLoadGuestRip failed! rc=%Rrc\n",
8343 VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
8344 }
8345 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
8346 }
8347 else if (HMCPU_CF_VALUE(pVCpu))
8348 {
8349 rcStrict = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx);
8350 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8351 { /* likely */}
8352 else
8353 {
8354 AssertLogRelMsgFailedReturn(("hmR0VmxLoadGuestStateOptimal: hmR0VmxLoadGuestState failed! rc=%Rrc\n",
8355 VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
8356 }
8357 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
8358 }
8359
8360 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
8361 AssertMsg( !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST)
8362 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
8363 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8364 return rcStrict;
8365}
8366
8367
8368/**
8369 * Does the preparations before executing guest code in VT-x.
8370 *
8371 * This may cause longjmps to ring-3 and may even result in rescheduling to the
8372 * recompiler/IEM. We must be cautious what we do here regarding committing
8373 * guest-state information into the VMCS assuming we assuredly execute the
8374 * guest in VT-x mode.
8375 *
8376 * If we fall back to the recompiler/IEM after updating the VMCS and clearing
8377 * the common-state (TRPM/forceflags), we must undo those changes so that the
8378 * recompiler/IEM can (and should) use them when it resumes guest execution.
8379 * Otherwise such operations must be done when we can no longer exit to ring-3.
8380 *
8381 * @returns Strict VBox status code (i.e. informational status codes too).
8382 * @retval VINF_SUCCESS if we can proceed with running the guest, interrupts
8383 * have been disabled.
8384 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a
8385 * double-fault into the guest.
8386 * @retval VINF_EM_DBG_STEPPED if @a fStepping is true and an event was
8387 * dispatched directly.
8388 * @retval VINF_* scheduling changes, we have to go back to ring-3.
8389 *
8390 * @param pVM The cross context VM structure.
8391 * @param pVCpu The cross context virtual CPU structure.
8392 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8393 * out-of-sync. Make sure to update the required fields
8394 * before using them.
8395 * @param pVmxTransient Pointer to the VMX transient structure.
8396 * @param fStepping Set if called from hmR0VmxRunGuestCodeStep(). Makes
8397 * us ignore some of the reasons for returning to
8398 * ring-3, and return VINF_EM_DBG_STEPPED if event
8399 * dispatching took place.
8400 */
8401static VBOXSTRICTRC hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping)
8402{
8403 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8404
8405#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
8406 PGMRZDynMapFlushAutoSet(pVCpu);
8407#endif
8408
8409 /* Check force flag actions that might require us to go back to ring-3. */
8410 VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx, fStepping);
8411 if (rcStrict == VINF_SUCCESS)
8412 { /* FFs doesn't get set all the time. */ }
8413 else
8414 return rcStrict;
8415
8416#ifndef IEM_VERIFICATION_MODE_FULL
8417 /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
8418 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
8419 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
8420 {
8421 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
8422 RTGCPHYS GCPhysApicBase;
8423 GCPhysApicBase = pMixedCtx->msrApicBase;
8424 GCPhysApicBase &= PAGE_BASE_GC_MASK;
8425
8426 /* Unalias any existing mapping. */
8427 int rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
8428 AssertRCReturn(rc, rc);
8429
8430 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
8431 Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));
8432 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
8433 AssertRCReturn(rc, rc);
8434
8435 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
8436 }
8437#endif /* !IEM_VERIFICATION_MODE_FULL */
8438
8439 if (TRPMHasTrap(pVCpu))
8440 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
8441 hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);
8442
8443 /*
8444 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus needs to be done with
8445 * longjmps or interrupts + preemption enabled. Event injection might also result in triple-faulting the VM.
8446 */
8447 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fStepping);
8448 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8449 { /* likely */ }
8450 else
8451 {
8452 AssertMsg(rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
8453 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8454 return rcStrict;
8455 }
8456
8457 /*
8458 * Load the guest state bits, we can handle longjmps/getting preempted here.
8459 *
8460 * If we are injecting events to a real-on-v86 mode guest, we will have to update
8461 * RIP and some segment registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs().
8462 * Hence, this needs to be done -after- injection of events.
8463 */
8464 rcStrict = hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);
8465 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8466 { /* likely */ }
8467 else
8468 return rcStrict;
8469
8470 /*
8471 * No longjmps to ring-3 from this point on!!!
8472 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
8473 * This also disables flushing of the R0-logger instance (if any).
8474 */
8475 VMMRZCallRing3Disable(pVCpu);
8476
8477 /*
8478 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
8479 * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
8480 *
8481 * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
8482 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
8483 *
8484 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
8485 * executing guest code.
8486 */
8487 pVmxTransient->fEFlags = ASMIntDisableFlags();
8488
8489 if ( ( !VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
8490 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
8491 || ( fStepping /* Optimized for the non-stepping case, so a bit of unnecessary work when stepping. */
8492 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) )
8493 {
8494 if (!RTThreadPreemptIsPending(NIL_RTTHREAD))
8495 {
8496 /* We've injected any pending events. This is really the point of no return (to ring-3). */
8497 pVCpu->hm.s.Event.fPending = false;
8498
8499 return VINF_SUCCESS;
8500 }
8501
8502 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
8503 rcStrict = VINF_EM_RAW_INTERRUPT;
8504 }
8505 else
8506 {
8507 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
8508 rcStrict = VINF_EM_RAW_TO_R3;
8509 }
8510
8511 ASMSetFlags(pVmxTransient->fEFlags);
8512 VMMRZCallRing3Enable(pVCpu);
8513
8514 return rcStrict;
8515}
8516
8517
8518/**
8519 * Prepares to run guest code in VT-x and we've committed to doing so. This
8520 * means there is no backing out to ring-3 or anywhere else at this
8521 * point.
8522 *
8523 * @param pVM The cross context VM structure.
8524 * @param pVCpu The cross context virtual CPU structure.
8525 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8526 * out-of-sync. Make sure to update the required fields
8527 * before using them.
8528 * @param pVmxTransient Pointer to the VMX transient structure.
8529 *
8530 * @remarks Called with preemption disabled.
8531 * @remarks No-long-jump zone!!!
8532 */
8533static void hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8534{
8535 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8536 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8537 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8538
8539 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8540 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
8541
8542#ifdef HMVMX_ALWAYS_SWAP_FPU_STATE
8543 if (!CPUMIsGuestFPUStateActive(pVCpu))
8544 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8545 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8546#endif
8547
8548 if ( pVCpu->hm.s.fPreloadGuestFpu
8549 && !CPUMIsGuestFPUStateActive(pVCpu))
8550 {
8551 CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
8552 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));
8553 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8554 }
8555
8556 /*
8557 * Lazy-update of the host MSRs values in the auto-load/store MSR area.
8558 */
8559 if ( !pVCpu->hm.s.vmx.fUpdatedHostMsrs
8560 && pVCpu->hm.s.vmx.cMsrs > 0)
8561 {
8562 hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu);
8563 }
8564
8565 /*
8566 * Load the host state bits as we may've been preempted (only happens when
8567 * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM).
8568 */
8569 /** @todo Why should hmR0VmxSetupVMRunHandler() changing pfnStartVM have
8570 * any effect to the host state needing to be saved? */
8571 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
8572 {
8573 /* This ASSUMES that pfnStartVM has been set up already. */
8574 int rc = hmR0VmxSaveHostState(pVM, pVCpu);
8575 AssertRC(rc);
8576 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreemptSaveHostState);
8577 }
8578 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT));
8579
8580 /*
8581 * Load the state shared between host and guest (FPU, debug, lazy MSRs).
8582 */
8583 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))
8584 hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx);
8585 AssertMsg(!HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
8586
8587 /* Store status of the shared guest-host state at the time of VM-entry. */
8588#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
8589 if (CPUMIsGuestInLongModeEx(pMixedCtx))
8590 {
8591 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
8592 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
8593 }
8594 else
8595#endif
8596 {
8597 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
8598 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
8599 }
8600 pVmxTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);
8601
8602 /*
8603 * Cache the TPR-shadow for checking on every VM-exit if it might have changed.
8604 */
8605 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8606 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
8607
8608 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
8609 RTCPUID idCurrentCpu = pCpu->idCpu;
8610 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
8611 || idCurrentCpu != pVCpu->hm.s.idLastCpu)
8612 {
8613 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVM, pVCpu);
8614 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
8615 }
8616
8617 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */
8618 hmR0VmxFlushTaggedTlb(pVCpu, pCpu); /* Invalidate the appropriate guest entries from the TLB. */
8619 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
8620 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */
8621
8622 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
8623
8624 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
8625 to start executing. */
8626
8627 /*
8628 * Load the TSC_AUX MSR when we are not intercepting RDTSCP.
8629 */
8630 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
8631 {
8632 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8633 {
8634 bool fMsrUpdated;
8635 int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
8636 AssertRC(rc2);
8637 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS));
8638
8639 rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMR0GetGuestTscAux(pVCpu), true /* fUpdateHostMsr */,
8640 &fMsrUpdated);
8641 AssertRC(rc2);
8642 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8643
8644 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */
8645 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
8646 }
8647 else
8648 {
8649 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX);
8650 Assert(!pVCpu->hm.s.vmx.cMsrs || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8651 }
8652 }
8653
8654#ifdef VBOX_STRICT
8655 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu);
8656 hmR0VmxCheckHostEferMsr(pVCpu);
8657 AssertRC(hmR0VmxCheckVmcsCtls(pVCpu));
8658#endif
8659#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
8660 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
8661 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
8662 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
8663#endif
8664}
8665
8666
8667/**
8668 * Performs some essential restoration of state after running guest code in
8669 * VT-x.
8670 *
8671 * @param pVM The cross context VM structure.
8672 * @param pVCpu The cross context virtual CPU structure.
8673 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
8674 * out-of-sync. Make sure to update the required fields
8675 * before using them.
8676 * @param pVmxTransient Pointer to the VMX transient structure.
8677 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
8678 *
8679 * @remarks Called with interrupts disabled, and returns with interrupts enabled!
8680 *
8681 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
8682 * unconditionally when it is safe to do so.
8683 */
8684static void hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
8685{
8686 NOREF(pVM);
8687
8688 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8689
8690 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
8691 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */
8692 HMVMXCPU_GST_RESET_TO(pVCpu, 0); /* Exits/longjmps to ring-3 requires saving the guest state. */
8693 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
8694 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
8695 pVmxTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
8696
8697 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8698 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset);
8699
8700 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
8701 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
8702 Assert(!ASMIntAreEnabled());
8703 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8704
8705#ifdef HMVMX_ALWAYS_SWAP_FPU_STATE
8706 if (CPUMIsGuestFPUStateActive(pVCpu))
8707 {
8708 hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8709 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
8710 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
8711 }
8712#endif
8713
8714#if HC_ARCH_BITS == 64
8715 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Host state messed up by VT-x, we must restore. */
8716#endif
8717 pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
8718#ifdef VBOX_STRICT
8719 hmR0VmxCheckHostEferMsr(pVCpu); /* Verify that VMRUN/VMLAUNCH didn't modify host EFER. */
8720#endif
8721 ASMSetFlags(pVmxTransient->fEFlags); /* Enable interrupts. */
8722 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
8723
8724 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
8725 uint32_t uExitReason;
8726 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
8727 rc |= hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
8728 AssertRC(rc);
8729 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
8730 pVmxTransient->fVMEntryFailed = VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uEntryIntInfo);
8731
8732 /* Update the VM-exit history array. */
8733 HMCPU_EXIT_HISTORY_ADD(pVCpu, pVmxTransient->uExitReason);
8734
8735 /* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */
8736 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
8737 {
8738 Log4(("VM-entry failure: pVCpu=%p idCpu=%RU32 rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", pVCpu, pVCpu->idCpu, rcVMRun,
8739 pVmxTransient->fVMEntryFailed));
8740 return;
8741 }
8742
8743 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
8744 {
8745 /** @todo We can optimize this by only syncing with our force-flags when
8746 * really needed and keeping the VMCS state as it is for most
8747 * VM-exits. */
8748 /* Update the guest interruptibility-state from the VMCS. */
8749 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
8750
8751#if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
8752 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
8753 AssertRC(rc);
8754#elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
8755 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8756 AssertRC(rc);
8757#endif
8758
8759 /*
8760 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
8761 * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3 and which is
8762 * why it's done here as it's easier and no less efficient to deal with it here than making hmR0VmxSaveGuestState()
8763 * cope with longjmps safely (see VMCPU_FF_HM_UPDATE_CR3 handling).
8764 */
8765 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8766 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
8767 {
8768 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
8769 AssertRC(rc);
8770 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
8771 }
8772 }
8773}
8774
8775
8776/**
8777 * Runs the guest code using VT-x the normal way.
8778 *
8779 * @returns VBox status code.
8780 * @param pVM The cross context VM structure.
8781 * @param pVCpu The cross context virtual CPU structure.
8782 * @param pCtx Pointer to the guest-CPU context.
8783 *
8784 * @note Mostly the same as hmR0VmxRunGuestCodeStep().
8785 */
8786static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
8787{
8788 VMXTRANSIENT VmxTransient;
8789 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
8790 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
8791 uint32_t cLoops = 0;
8792
8793 for (;; cLoops++)
8794 {
8795 Assert(!HMR0SuspendPending());
8796 HMVMX_ASSERT_CPU_SAFE();
8797
8798 /* Preparatory work for running guest code, this may force us to return
8799 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
8800 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
8801 rcStrict = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, false /* fStepping */);
8802 if (rcStrict != VINF_SUCCESS)
8803 break;
8804
8805 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
8806 int rcRun = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
8807 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
8808
8809 /* Restore any residual host-state and save any bits shared between host
8810 and guest into the guest-CPU state. Re-enables interrupts! */
8811 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, VBOXSTRICTRC_TODO(rcStrict));
8812
8813 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
8814 if (RT_SUCCESS(rcRun))
8815 { /* very likely */ }
8816 else
8817 {
8818 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
8819 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rcRun, pCtx, &VmxTransient);
8820 return rcRun;
8821 }
8822
8823 /* Profile the VM-exit. */
8824 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
8825 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
8826 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
8827 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
8828 HMVMX_START_EXIT_DISPATCH_PROF();
8829
8830 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
8831
8832 /* Handle the VM-exit. */
8833#ifdef HMVMX_USE_FUNCTION_TABLE
8834 rcStrict = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
8835#else
8836 rcStrict = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
8837#endif
8838 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
8839 if (rcStrict == VINF_SUCCESS)
8840 {
8841 if (cLoops <= pVM->hm.s.cMaxResumeLoops)
8842 continue; /* likely */
8843 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
8844 rcStrict = VINF_EM_RAW_INTERRUPT;
8845 }
8846 break;
8847 }
8848
8849 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
8850 return rcStrict;
8851}
8852
8853
8854
8855/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
8856 * probes.
8857 *
8858 * The following few functions and associated structure contains the bloat
8859 * necessary for providing detailed debug events and dtrace probes as well as
8860 * reliable host side single stepping. This works on the principle of
8861 * "subclassing" the normal execution loop and workers. We replace the loop
8862 * method completely and override selected helpers to add necessary adjustments
8863 * to their core operation.
8864 *
8865 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
8866 * any performance for debug and analysis features.
8867 *
8868 * @{
8869 */
8870
8871typedef struct VMXRUNDBGSTATE
8872{
8873 /** The RIP we started executing at. This is for detecting that we stepped. */
8874 uint64_t uRipStart;
8875 /** The CS we started executing with. */
8876 uint16_t uCsStart;
8877
8878 /** Whether we've actually modified the 1st execution control field. */
8879 bool fModifiedProcCtls : 1;
8880 /** Whether we've actually modified the 2nd execution control field. */
8881 bool fModifiedProcCtls2 : 1;
8882 /** Whether we've actually modified the exception bitmap. */
8883 bool fModifiedXcptBitmap : 1;
8884
8885 /** We desire the modified the CR0 mask to be cleared. */
8886 bool fClearCr0Mask : 1;
8887 /** We desire the modified the CR4 mask to be cleared. */
8888 bool fClearCr4Mask : 1;
8889 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
8890 uint32_t fCpe1Extra;
8891 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
8892 uint32_t fCpe1Unwanted;
8893 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
8894 uint32_t fCpe2Extra;
8895 /** Extra stuff we need in */
8896 uint32_t bmXcptExtra;
8897 /** The sequence number of the Dtrace provider settings the state was
8898 * configured against. */
8899 uint32_t uDtraceSettingsSeqNo;
8900 /** Exits to check (one bit per exit). */
8901 uint32_t bmExitsToCheck[3];
8902
8903 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
8904 uint32_t fProcCtlsInitial;
8905 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
8906 uint32_t fProcCtls2Initial;
8907 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
8908 uint32_t bmXcptInitial;
8909} VMXRUNDBGSTATE;
8910AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
8911typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
8912
8913
8914/**
8915 * Initializes the VMXRUNDBGSTATE structure.
8916 *
8917 * @param pVCpu The cross context virtual CPU structure of the
8918 * calling EMT.
8919 * @param pCtx The CPU register context to go with @a pVCpu.
8920 * @param pDbgState The structure to initialize.
8921 */
8922DECLINLINE(void) hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PCCPUMCTX pCtx, PVMXRUNDBGSTATE pDbgState)
8923{
8924 pDbgState->uRipStart = pCtx->rip;
8925 pDbgState->uCsStart = pCtx->cs.Sel;
8926
8927 pDbgState->fModifiedProcCtls = false;
8928 pDbgState->fModifiedProcCtls2 = false;
8929 pDbgState->fModifiedXcptBitmap = false;
8930 pDbgState->fClearCr0Mask = false;
8931 pDbgState->fClearCr4Mask = false;
8932 pDbgState->fCpe1Extra = 0;
8933 pDbgState->fCpe1Unwanted = 0;
8934 pDbgState->fCpe2Extra = 0;
8935 pDbgState->bmXcptExtra = 0;
8936 pDbgState->fProcCtlsInitial = pVCpu->hm.s.vmx.u32ProcCtls;
8937 pDbgState->fProcCtls2Initial = pVCpu->hm.s.vmx.u32ProcCtls2;
8938 pDbgState->bmXcptInitial = pVCpu->hm.s.vmx.u32XcptBitmap;
8939}
8940
8941
8942/**
8943 * Updates the VMSC fields with changes requested by @a pDbgState.
8944 *
8945 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
8946 * immediately before executing guest code, i.e. when interrupts are disabled.
8947 * We don't check status codes here as we cannot easily assert or return in the
8948 * latter case.
8949 *
8950 * @param pVCpu The cross context virtual CPU structure.
8951 * @param pDbgState The debug state.
8952 */
8953DECLINLINE(void) hmR0VmxPreRunGuestDebugStateApply(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState)
8954{
8955 /*
8956 * Ensure desired flags in VMCS control fields are set.
8957 * (Ignoring write failure here, as we're committed and it's just debug extras.)
8958 *
8959 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
8960 * there should be no stale data in pCtx at this point.
8961 */
8962 if ( (pVCpu->hm.s.vmx.u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
8963 || (pVCpu->hm.s.vmx.u32ProcCtls & pDbgState->fCpe1Unwanted))
8964 {
8965 pVCpu->hm.s.vmx.u32ProcCtls |= pDbgState->fCpe1Extra;
8966 pVCpu->hm.s.vmx.u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
8967 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
8968 Log6(("hmR0VmxRunDebugStateRevert: VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVCpu->hm.s.vmx.u32ProcCtls));
8969 pDbgState->fModifiedProcCtls = true;
8970 }
8971
8972 if ((pVCpu->hm.s.vmx.u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
8973 {
8974 pVCpu->hm.s.vmx.u32ProcCtls2 |= pDbgState->fCpe2Extra;
8975 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pVCpu->hm.s.vmx.u32ProcCtls2);
8976 Log6(("hmR0VmxRunDebugStateRevert: VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVCpu->hm.s.vmx.u32ProcCtls2));
8977 pDbgState->fModifiedProcCtls2 = true;
8978 }
8979
8980 if ((pVCpu->hm.s.vmx.u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
8981 {
8982 pVCpu->hm.s.vmx.u32XcptBitmap |= pDbgState->bmXcptExtra;
8983 VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
8984 Log6(("hmR0VmxRunDebugStateRevert: VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVCpu->hm.s.vmx.u32XcptBitmap));
8985 pDbgState->fModifiedXcptBitmap = true;
8986 }
8987
8988 if (pDbgState->fClearCr0Mask && pVCpu->hm.s.vmx.u32CR0Mask != 0)
8989 {
8990 pVCpu->hm.s.vmx.u32CR0Mask = 0;
8991 VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, 0);
8992 Log6(("hmR0VmxRunDebugStateRevert: VMX_VMCS_CTRL_CR0_MASK: 0\n"));
8993 }
8994
8995 if (pDbgState->fClearCr4Mask && pVCpu->hm.s.vmx.u32CR4Mask != 0)
8996 {
8997 pVCpu->hm.s.vmx.u32CR4Mask = 0;
8998 VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, 0);
8999 Log6(("hmR0VmxRunDebugStateRevert: VMX_VMCS_CTRL_CR4_MASK: 0\n"));
9000 }
9001}
9002
9003
9004DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugStateRevert(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, VBOXSTRICTRC rcStrict)
9005{
9006 /*
9007 * Restore exit control settings as we may not reenter this function the
9008 * next time around.
9009 */
9010 /* We reload the initial value, trigger what we can of recalculations the
9011 next time around. From the looks of things, that's all that's required atm. */
9012 if (pDbgState->fModifiedProcCtls)
9013 {
9014 if (!(pDbgState->fProcCtlsInitial & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
9015 pDbgState->fProcCtlsInitial |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
9016 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
9017 AssertRCReturn(rc2, rc2);
9018 pVCpu->hm.s.vmx.u32ProcCtls = pDbgState->fProcCtlsInitial;
9019 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_DEBUG);
9020 }
9021
9022 /* We're currently the only ones messing with this one, so just restore the
9023 cached value and reload the field. */
9024 if ( pDbgState->fModifiedProcCtls2
9025 && pVCpu->hm.s.vmx.u32ProcCtls2 != pDbgState->fProcCtls2Initial)
9026 {
9027 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
9028 AssertRCReturn(rc2, rc2);
9029 pVCpu->hm.s.vmx.u32ProcCtls2 = pDbgState->fProcCtls2Initial;
9030 }
9031
9032 /* If we've modified the exception bitmap, we restore it and trigger
9033 reloading and partial recalculation the next time around. */
9034 if (pDbgState->fModifiedXcptBitmap)
9035 {
9036 pVCpu->hm.s.vmx.u32XcptBitmap = pDbgState->bmXcptInitial;
9037 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS | HM_CHANGED_GUEST_CR0);
9038 }
9039
9040 /* We assume hmR0VmxLoadSharedCR0 will recalculate and load the CR0 mask. */
9041 if (pDbgState->fClearCr0Mask)
9042 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
9043
9044 /* We assume hmR0VmxLoadGuestCR3AndCR4 will recalculate and load the CR4 mask. */
9045 if (pDbgState->fClearCr4Mask)
9046 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
9047
9048 return rcStrict;
9049}
9050
9051
9052/**
9053 * Configures VM-exit controls for current DBGF and DTrace settings.
9054 *
9055 * This updates @a pDbgState and the VMCS execution control fields to reflect
9056 * the necessary exits demanded by DBGF and DTrace.
9057 *
9058 * @param pVM The cross context VM structure.
9059 * @param pVCpu The cross context virtual CPU structure.
9060 * @param pCtx Pointer to the guest-CPU context.
9061 * @param pDbgState The debug state.
9062 * @param pVmxTransient Pointer to the VMX transient structure. May update
9063 * fUpdateTscOffsettingAndPreemptTimer.
9064 */
9065static void hmR0VmxPreRunGuestDebugStateUpdate(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx,
9066 PVMXRUNDBGSTATE pDbgState, PVMXTRANSIENT pVmxTransient)
9067{
9068 /*
9069 * Take down the dtrace serial number so we can spot changes.
9070 */
9071 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
9072 ASMCompilerBarrier();
9073
9074 /*
9075 * We'll rebuild most of the middle block of data members (holding the
9076 * current settings) as we go along here, so start by clearing it all.
9077 */
9078 pDbgState->bmXcptExtra = 0;
9079 pDbgState->fCpe1Extra = 0;
9080 pDbgState->fCpe1Unwanted = 0;
9081 pDbgState->fCpe2Extra = 0;
9082 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
9083 pDbgState->bmExitsToCheck[i] = 0;
9084
9085 /*
9086 * Software interrupts (INT XXh) - no idea how to trigger these...
9087 */
9088 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
9089 || VBOXVMM_INT_SOFTWARE_ENABLED())
9090 {
9091 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
9092 }
9093
9094 /*
9095 * Exception bitmap and XCPT events+probes.
9096 */
9097 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
9098 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
9099 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
9100
9101 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
9102 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
9103 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
9104 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
9105 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
9106 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
9107 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
9108 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
9109 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
9110 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
9111 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
9112 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
9113 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
9114 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
9115 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
9116 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
9117 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
9118 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
9119
9120 if (pDbgState->bmXcptExtra)
9121 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
9122
9123 /*
9124 * Process events and probes for VM exits, making sure we get the wanted exits.
9125 *
9126 * Note! This is the reverse of waft hmR0VmxHandleExitDtraceEvents does.
9127 * So, when adding/changing/removing please don't forget to update it.
9128 *
9129 * Some of the macros are picking up local variables to save horizontal space,
9130 * (being able to see it in a table is the lesser evil here).
9131 */
9132#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
9133 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
9134 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
9135#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
9136 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
9137 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
9138 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
9139 } else do { } while (0)
9140#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
9141 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
9142 { \
9143 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
9144 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
9145 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
9146 } else do { } while (0)
9147#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
9148 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
9149 { \
9150 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
9151 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
9152 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
9153 } else do { } while (0)
9154#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
9155 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
9156 { \
9157 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
9158 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
9159 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
9160 } else do { } while (0)
9161
9162 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
9163 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
9164 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
9165 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
9166 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
9167
9168 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
9169 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
9170 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
9171 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
9172 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT); /* paranoia */
9173 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
9174 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
9175 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
9176 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT);
9177 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
9178 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT);
9179 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
9180 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT);
9181 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
9182 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
9183 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
9184 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
9185 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
9186 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
9187 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
9188 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
9189 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
9190 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
9191 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
9192 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
9193 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
9194 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
9195 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
9196 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
9197 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
9198 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
9199 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
9200 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
9201 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
9202 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
9203 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
9204
9205 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
9206 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
9207 {
9208 int rc2 = hmR0VmxSaveGuestCR0(pVCpu, pCtx);
9209 rc2 |= hmR0VmxSaveGuestCR4(pVCpu, pCtx);
9210 rc2 |= hmR0VmxSaveGuestApicState(pVCpu, pCtx);
9211 AssertRC(rc2);
9212
9213#if 0 /** @todo fix me */
9214 pDbgState->fClearCr0Mask = true;
9215 pDbgState->fClearCr4Mask = true;
9216#endif
9217 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
9218 pDbgState->fCpe1Extra |= VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT;
9219 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
9220 pDbgState->fCpe1Extra |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT;
9221 pDbgState->fCpe1Unwanted |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* risky? */
9222 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
9223 require clearing here and in the loop if we start using it. */
9224 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
9225 }
9226 else
9227 {
9228 if (pDbgState->fClearCr0Mask)
9229 {
9230 pDbgState->fClearCr0Mask = false;
9231 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
9232 }
9233 if (pDbgState->fClearCr4Mask)
9234 {
9235 pDbgState->fClearCr4Mask = false;
9236 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
9237 }
9238 }
9239 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
9240 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
9241
9242 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
9243 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
9244 {
9245 /** @todo later, need to fix handler as it assumes this won't usually happen. */
9246 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
9247 }
9248 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
9249 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
9250
9251 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS); /* risky clearing this? */
9252 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
9253 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS);
9254 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
9255 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT); /* paranoia */
9256 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
9257 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT); /* paranoia */
9258 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
9259#if 0 /** @todo too slow, fix handler. */
9260 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT);
9261#endif
9262 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
9263
9264 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
9265 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
9266 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
9267 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
9268 {
9269 pDbgState->fCpe2Extra |= VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT;
9270 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XDTR_ACCESS);
9271 }
9272 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_XDTR_ACCESS);
9273 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_XDTR_ACCESS);
9274 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_XDTR_ACCESS);
9275 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_XDTR_ACCESS);
9276
9277 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
9278 || IS_EITHER_ENABLED(pVM, INSTR_STR)
9279 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
9280 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
9281 {
9282 pDbgState->fCpe2Extra |= VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT;
9283 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_TR_ACCESS);
9284 }
9285 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_TR_ACCESS);
9286 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_TR_ACCESS);
9287 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_TR_ACCESS);
9288 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_TR_ACCESS);
9289
9290 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
9291 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
9292 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT);
9293 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
9294 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
9295 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
9296 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT);
9297 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
9298 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
9299 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
9300 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT);
9301 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
9302 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT);
9303 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
9304 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
9305 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
9306 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_VMCS_CTRL_PROC_EXEC2_RDSEED_EXIT);
9307 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
9308 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
9309 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
9310 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
9311 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
9312
9313#undef IS_EITHER_ENABLED
9314#undef SET_ONLY_XBM_IF_EITHER_EN
9315#undef SET_CPE1_XBM_IF_EITHER_EN
9316#undef SET_CPEU_XBM_IF_EITHER_EN
9317#undef SET_CPE2_XBM_IF_EITHER_EN
9318
9319 /*
9320 * Sanitize the control stuff.
9321 */
9322 pDbgState->fCpe2Extra &= pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1;
9323 if (pDbgState->fCpe2Extra)
9324 pDbgState->fCpe1Extra |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
9325 pDbgState->fCpe1Extra &= pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1;
9326 pDbgState->fCpe1Unwanted &= ~pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0;
9327 if (pVCpu->hm.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
9328 {
9329 pVCpu->hm.s.fDebugWantRdTscExit ^= true;
9330 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
9331 }
9332
9333 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
9334 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
9335 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
9336 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
9337}
9338
9339
9340/**
9341 * Fires off DBGF events and dtrace probes for an exit, when it's appropriate.
9342 *
9343 * The caller has checked exit against the VMXRUNDBGSTATE::bmExitsToCheck
9344 * bitmap. The caller has checked for NMIs already, so we don't have to do that
9345 * either.
9346 *
9347 * @returns Strict VBox status code (i.e. informational status codes too).
9348 * @param pVM The cross context VM structure.
9349 * @param pVCpu The cross context virtual CPU structure.
9350 * @param pMixedCtx Pointer to the guest-CPU context.
9351 * @param pVmxTransient Pointer to the VMX-transient structure.
9352 * @param uExitReason The VM-exit reason.
9353 *
9354 * @remarks The name of this function is displayed by dtrace, so keep it short
9355 * and to the point. No longer than 33 chars long, please.
9356 */
9357static VBOXSTRICTRC hmR0VmxHandleExitDtraceEvents(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx,
9358 PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
9359{
9360 /*
9361 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
9362 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
9363 *
9364 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
9365 * does. Must add/change/remove both places. Same ordering, please.
9366 *
9367 * Added/removed events must also be reflected in the next section
9368 * where we dispatch dtrace events.
9369 */
9370 bool fDtrace1 = false;
9371 bool fDtrace2 = false;
9372 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
9373 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
9374 uint32_t uEventArg = 0;
9375#define SET_EXIT(a_EventSubName) \
9376 do { \
9377 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
9378 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
9379 } while (0)
9380#define SET_BOTH(a_EventSubName) \
9381 do { \
9382 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
9383 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
9384 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
9385 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
9386 } while (0)
9387 switch (uExitReason)
9388 {
9389 case VMX_EXIT_MTF:
9390 return hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient);
9391
9392 case VMX_EXIT_XCPT_OR_NMI:
9393 {
9394 uint8_t const idxVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
9395 switch (VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo))
9396 {
9397 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
9398 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT:
9399 case VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT:
9400 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
9401 {
9402 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uExitIntInfo))
9403 {
9404 hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
9405 uEventArg = pVmxTransient->uExitIntErrorCode;
9406 }
9407 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
9408 switch (enmEvent1)
9409 {
9410 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
9411 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
9412 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
9413 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
9414 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
9415 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
9416 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
9417 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
9418 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
9419 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
9420 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
9421 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
9422 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
9423 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
9424 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
9425 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
9426 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
9427 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
9428 default: break;
9429 }
9430 }
9431 else
9432 AssertFailed();
9433 break;
9434
9435 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT:
9436 uEventArg = idxVector;
9437 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
9438 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
9439 break;
9440 }
9441 break;
9442 }
9443
9444 case VMX_EXIT_TRIPLE_FAULT:
9445 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
9446 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
9447 break;
9448 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
9449 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
9450 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
9451 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
9452 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
9453
9454 /* Instruction specific VM-exits: */
9455 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
9456 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
9457 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
9458 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
9459 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
9460 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
9461 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
9462 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
9463 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
9464 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
9465 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
9466 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
9467 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
9468 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
9469 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
9470 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
9471 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
9472 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
9473 case VMX_EXIT_MOV_CRX:
9474 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9475/** @todo r=bird: I feel these macros aren't very descriptive and needs to be at least 30 chars longer! ;-)
9476* Sensible abbreviations strongly recommended here because even with 130 columns this stuff get too wide! */
9477 if ( VMX_EXIT_QUALIFICATION_CRX_ACCESS(pVmxTransient->uExitQualification)
9478 == VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ)
9479 SET_BOTH(CRX_READ);
9480 else
9481 SET_BOTH(CRX_WRITE);
9482 uEventArg = VMX_EXIT_QUALIFICATION_CRX_REGISTER(pVmxTransient->uExitQualification);
9483 break;
9484 case VMX_EXIT_MOV_DRX:
9485 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9486 if ( VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification)
9487 == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_READ)
9488 SET_BOTH(DRX_READ);
9489 else
9490 SET_BOTH(DRX_WRITE);
9491 uEventArg = VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification);
9492 break;
9493 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
9494 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
9495 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
9496 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
9497 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
9498 case VMX_EXIT_XDTR_ACCESS:
9499 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
9500 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_XDTR_INSINFO_INSTR_ID))
9501 {
9502 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
9503 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
9504 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
9505 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
9506 }
9507 break;
9508
9509 case VMX_EXIT_TR_ACCESS:
9510 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
9511 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_YYTR_INSINFO_INSTR_ID))
9512 {
9513 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
9514 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
9515 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
9516 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
9517 }
9518 break;
9519
9520 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
9521 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
9522 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
9523 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
9524 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
9525 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
9526 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
9527 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
9528 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
9529 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
9530 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
9531
9532 /* Events that aren't relevant at this point. */
9533 case VMX_EXIT_EXT_INT:
9534 case VMX_EXIT_INT_WINDOW:
9535 case VMX_EXIT_NMI_WINDOW:
9536 case VMX_EXIT_TPR_BELOW_THRESHOLD:
9537 case VMX_EXIT_PREEMPT_TIMER:
9538 case VMX_EXIT_IO_INSTR:
9539 break;
9540
9541 /* Errors and unexpected events. */
9542 case VMX_EXIT_INIT_SIGNAL:
9543 case VMX_EXIT_SIPI:
9544 case VMX_EXIT_IO_SMI:
9545 case VMX_EXIT_SMI:
9546 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
9547 case VMX_EXIT_ERR_MSR_LOAD:
9548 case VMX_EXIT_ERR_MACHINE_CHECK:
9549 break;
9550
9551 default:
9552 AssertMsgFailed(("Unexpected exit=%#x\n", uExitReason));
9553 break;
9554 }
9555#undef SET_BOTH
9556#undef SET_EXIT
9557
9558 /*
9559 * Dtrace tracepoints go first. We do them here at once so we don't
9560 * have to copy the guest state saving and stuff a few dozen times.
9561 * Down side is that we've got to repeat the switch, though this time
9562 * we use enmEvent since the probes are a subset of what DBGF does.
9563 */
9564 if (fDtrace1 || fDtrace2)
9565 {
9566 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9567 hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9568 switch (enmEvent1)
9569 {
9570 /** @todo consider which extra parameters would be helpful for each probe. */
9571 case DBGFEVENT_END: break;
9572 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pMixedCtx); break;
9573 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pMixedCtx, pMixedCtx->dr[6]); break;
9574 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pMixedCtx); break;
9575 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pMixedCtx); break;
9576 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pMixedCtx); break;
9577 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pMixedCtx); break;
9578 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pMixedCtx); break;
9579 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pMixedCtx); break;
9580 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pMixedCtx, uEventArg); break;
9581 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pMixedCtx, uEventArg); break;
9582 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pMixedCtx, uEventArg); break;
9583 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pMixedCtx, uEventArg); break;
9584 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pMixedCtx, uEventArg, pMixedCtx->cr2); break;
9585 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pMixedCtx); break;
9586 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pMixedCtx); break;
9587 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pMixedCtx); break;
9588 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pMixedCtx); break;
9589 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pMixedCtx, uEventArg); break;
9590 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9591 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pMixedCtx, pMixedCtx->eax, pMixedCtx->ecx); break;
9592 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pMixedCtx); break;
9593 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pMixedCtx); break;
9594 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pMixedCtx); break;
9595 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pMixedCtx); break;
9596 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pMixedCtx); break;
9597 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pMixedCtx); break;
9598 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pMixedCtx); break;
9599 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9600 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9601 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9602 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9603 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pMixedCtx, pMixedCtx->ecx); break;
9604 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pMixedCtx, pMixedCtx->ecx,
9605 RT_MAKE_U64(pMixedCtx->eax, pMixedCtx->edx)); break;
9606 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pMixedCtx); break;
9607 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pMixedCtx); break;
9608 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pMixedCtx); break;
9609 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pMixedCtx); break;
9610 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pMixedCtx); break;
9611 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pMixedCtx); break;
9612 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pMixedCtx); break;
9613 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pMixedCtx); break;
9614 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pMixedCtx); break;
9615 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pMixedCtx); break;
9616 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pMixedCtx); break;
9617 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pMixedCtx); break;
9618 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pMixedCtx); break;
9619 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pMixedCtx); break;
9620 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pMixedCtx); break;
9621 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pMixedCtx); break;
9622 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pMixedCtx); break;
9623 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pMixedCtx); break;
9624 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pMixedCtx); break;
9625 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pMixedCtx); break;
9626 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pMixedCtx); break;
9627 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pMixedCtx); break;
9628 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pMixedCtx); break;
9629 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pMixedCtx); break;
9630 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pMixedCtx); break;
9631 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pMixedCtx); break;
9632 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pMixedCtx); break;
9633 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pMixedCtx); break;
9634 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pMixedCtx); break;
9635 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pMixedCtx); break;
9636 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pMixedCtx); break;
9637 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pMixedCtx); break;
9638 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
9639 }
9640 switch (enmEvent2)
9641 {
9642 /** @todo consider which extra parameters would be helpful for each probe. */
9643 case DBGFEVENT_END: break;
9644 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pMixedCtx); break;
9645 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pMixedCtx, pMixedCtx->eax, pMixedCtx->ecx); break;
9646 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pMixedCtx); break;
9647 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pMixedCtx); break;
9648 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pMixedCtx); break;
9649 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pMixedCtx); break;
9650 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pMixedCtx); break;
9651 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pMixedCtx); break;
9652 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pMixedCtx); break;
9653 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9654 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9655 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9656 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9657 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pMixedCtx, pMixedCtx->ecx); break;
9658 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pMixedCtx, pMixedCtx->ecx,
9659 RT_MAKE_U64(pMixedCtx->eax, pMixedCtx->edx)); break;
9660 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pMixedCtx); break;
9661 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pMixedCtx); break;
9662 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pMixedCtx); break;
9663 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pMixedCtx); break;
9664 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pMixedCtx); break;
9665 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pMixedCtx); break;
9666 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pMixedCtx); break;
9667 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pMixedCtx); break;
9668 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pMixedCtx); break;
9669 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pMixedCtx); break;
9670 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pMixedCtx); break;
9671 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pMixedCtx); break;
9672 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pMixedCtx); break;
9673 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pMixedCtx); break;
9674 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pMixedCtx); break;
9675 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pMixedCtx); break;
9676 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pMixedCtx); break;
9677 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pMixedCtx); break;
9678 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pMixedCtx); break;
9679 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pMixedCtx); break;
9680 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pMixedCtx); break;
9681 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pMixedCtx); break;
9682 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pMixedCtx); break;
9683 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pMixedCtx); break;
9684 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pMixedCtx); break;
9685 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pMixedCtx); break;
9686 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pMixedCtx); break;
9687 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pMixedCtx); break;
9688 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pMixedCtx); break;
9689 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pMixedCtx); break;
9690 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pMixedCtx); break;
9691 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pMixedCtx); break;
9692 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pMixedCtx); break;
9693 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pMixedCtx); break;
9694 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pMixedCtx); break;
9695 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pMixedCtx); break;
9696 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
9697 }
9698 }
9699
9700 /*
9701 * Fire of the DBGF event, if enabled (our check here is just a quick one,
9702 * the DBGF call will do a full check).
9703 *
9704 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
9705 * Note! If we have to events, we prioritize the first, i.e. the instruction
9706 * one, in order to avoid event nesting.
9707 */
9708 if ( enmEvent1 != DBGFEVENT_END
9709 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
9710 {
9711 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArg(pVM, pVCpu, enmEvent1, uEventArg, DBGFEVENTCTX_HM);
9712 if (rcStrict != VINF_SUCCESS)
9713 return rcStrict;
9714 }
9715 else if ( enmEvent2 != DBGFEVENT_END
9716 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
9717 {
9718 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArg(pVM, pVCpu, enmEvent2, uEventArg, DBGFEVENTCTX_HM);
9719 if (rcStrict != VINF_SUCCESS)
9720 return rcStrict;
9721 }
9722
9723 return VINF_SUCCESS;
9724}
9725
9726
9727/**
9728 * Single-stepping VM-exit filtering.
9729 *
9730 * This is preprocessing the exits and deciding whether we've gotten far enough
9731 * to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit handling is
9732 * performed.
9733 *
9734 * @returns Strict VBox status code (i.e. informational status codes too).
9735 * @param pVM The cross context VM structure.
9736 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9737 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
9738 * out-of-sync. Make sure to update the required
9739 * fields before using them.
9740 * @param pVmxTransient Pointer to the VMX-transient structure.
9741 * @param uExitReason The VM-exit reason.
9742 * @param pDbgState The debug state.
9743 */
9744DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugHandleExit(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
9745 uint32_t uExitReason, PVMXRUNDBGSTATE pDbgState)
9746{
9747 /*
9748 * Expensive (saves context) generic dtrace exit probe.
9749 */
9750 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
9751 { /* more likely */ }
9752 else
9753 {
9754 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9755 hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9756 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pMixedCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQualification);
9757 }
9758
9759 /*
9760 * Check for host NMI, just to get that out of the way.
9761 */
9762 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
9763 { /* normally likely */ }
9764 else
9765 {
9766 int rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
9767 AssertRCReturn(rc2, rc2);
9768 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
9769 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9770 return hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient);
9771 }
9772
9773 /*
9774 * Check for single stepping event if we're stepping.
9775 */
9776 if (pVCpu->hm.s.fSingleInstruction)
9777 {
9778 switch (uExitReason)
9779 {
9780 case VMX_EXIT_MTF:
9781 return hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient);
9782
9783 /* Various events: */
9784 case VMX_EXIT_XCPT_OR_NMI:
9785 case VMX_EXIT_EXT_INT:
9786 case VMX_EXIT_TRIPLE_FAULT:
9787 case VMX_EXIT_INT_WINDOW:
9788 case VMX_EXIT_NMI_WINDOW:
9789 case VMX_EXIT_TASK_SWITCH:
9790 case VMX_EXIT_TPR_BELOW_THRESHOLD:
9791 case VMX_EXIT_APIC_ACCESS:
9792 case VMX_EXIT_EPT_VIOLATION:
9793 case VMX_EXIT_EPT_MISCONFIG:
9794 case VMX_EXIT_PREEMPT_TIMER:
9795
9796 /* Instruction specific VM-exits: */
9797 case VMX_EXIT_CPUID:
9798 case VMX_EXIT_GETSEC:
9799 case VMX_EXIT_HLT:
9800 case VMX_EXIT_INVD:
9801 case VMX_EXIT_INVLPG:
9802 case VMX_EXIT_RDPMC:
9803 case VMX_EXIT_RDTSC:
9804 case VMX_EXIT_RSM:
9805 case VMX_EXIT_VMCALL:
9806 case VMX_EXIT_VMCLEAR:
9807 case VMX_EXIT_VMLAUNCH:
9808 case VMX_EXIT_VMPTRLD:
9809 case VMX_EXIT_VMPTRST:
9810 case VMX_EXIT_VMREAD:
9811 case VMX_EXIT_VMRESUME:
9812 case VMX_EXIT_VMWRITE:
9813 case VMX_EXIT_VMXOFF:
9814 case VMX_EXIT_VMXON:
9815 case VMX_EXIT_MOV_CRX:
9816 case VMX_EXIT_MOV_DRX:
9817 case VMX_EXIT_IO_INSTR:
9818 case VMX_EXIT_RDMSR:
9819 case VMX_EXIT_WRMSR:
9820 case VMX_EXIT_MWAIT:
9821 case VMX_EXIT_MONITOR:
9822 case VMX_EXIT_PAUSE:
9823 case VMX_EXIT_XDTR_ACCESS:
9824 case VMX_EXIT_TR_ACCESS:
9825 case VMX_EXIT_INVEPT:
9826 case VMX_EXIT_RDTSCP:
9827 case VMX_EXIT_INVVPID:
9828 case VMX_EXIT_WBINVD:
9829 case VMX_EXIT_XSETBV:
9830 case VMX_EXIT_RDRAND:
9831 case VMX_EXIT_INVPCID:
9832 case VMX_EXIT_VMFUNC:
9833 case VMX_EXIT_RDSEED:
9834 case VMX_EXIT_XSAVES:
9835 case VMX_EXIT_XRSTORS:
9836 {
9837 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
9838 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9839 AssertRCReturn(rc2, rc2);
9840 if ( pMixedCtx->rip != pDbgState->uRipStart
9841 || pMixedCtx->cs.Sel != pDbgState->uCsStart)
9842 return VINF_EM_DBG_STEPPED;
9843 break;
9844 }
9845
9846 /* Errors and unexpected events: */
9847 case VMX_EXIT_INIT_SIGNAL:
9848 case VMX_EXIT_SIPI:
9849 case VMX_EXIT_IO_SMI:
9850 case VMX_EXIT_SMI:
9851 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
9852 case VMX_EXIT_ERR_MSR_LOAD:
9853 case VMX_EXIT_ERR_MACHINE_CHECK:
9854 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
9855 break;
9856
9857 default:
9858 AssertMsgFailed(("Unexpected exit=%#x\n", uExitReason));
9859 break;
9860 }
9861 }
9862
9863 /*
9864 * Check for debugger event breakpoints and dtrace probes.
9865 */
9866 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
9867 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
9868 {
9869 VBOXSTRICTRC rcStrict = hmR0VmxHandleExitDtraceEvents(pVM, pVCpu, pMixedCtx, pVmxTransient, uExitReason);
9870 if (rcStrict != VINF_SUCCESS)
9871 return rcStrict;
9872 }
9873
9874 /*
9875 * Normal processing.
9876 */
9877#ifdef HMVMX_USE_FUNCTION_TABLE
9878 return g_apfnVMExitHandlers[uExitReason](pVCpu, pMixedCtx, pVmxTransient);
9879#else
9880 return hmR0VmxHandleExit(pVCpu, pMixedCtx, pVmxTransient, uExitReason);
9881#endif
9882}
9883
9884
9885/**
9886 * Single steps guest code using VT-x.
9887 *
9888 * @returns Strict VBox status code (i.e. informational status codes too).
9889 * @param pVM The cross context VM structure.
9890 * @param pVCpu The cross context virtual CPU structure.
9891 * @param pCtx Pointer to the guest-CPU context.
9892 *
9893 * @note Mostly the same as hmR0VmxRunGuestCodeNormal().
9894 */
9895static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
9896{
9897 VMXTRANSIENT VmxTransient;
9898 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
9899
9900 /* Set HMCPU indicators. */
9901 bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction;
9902 pVCpu->hm.s.fSingleInstruction = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
9903 pVCpu->hm.s.fDebugWantRdTscExit = false;
9904 pVCpu->hm.s.fUsingDebugLoop = true;
9905
9906 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */
9907 VMXRUNDBGSTATE DbgState;
9908 hmR0VmxRunDebugStateInit(pVCpu, pCtx, &DbgState);
9909 hmR0VmxPreRunGuestDebugStateUpdate(pVM, pVCpu, pCtx, &DbgState, &VmxTransient);
9910
9911 /*
9912 * The loop.
9913 */
9914 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
9915 for (uint32_t cLoops = 0; ; cLoops++)
9916 {
9917 Assert(!HMR0SuspendPending());
9918 HMVMX_ASSERT_CPU_SAFE();
9919 bool fStepping = pVCpu->hm.s.fSingleInstruction;
9920
9921 /*
9922 * Preparatory work for running guest code, this may force us to return
9923 * to ring-3. This bugger disables interrupts on VINF_SUCCESS!
9924 */
9925 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
9926 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Set up execute controls the next to can respond to. */
9927 rcStrict = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient, fStepping);
9928 if (rcStrict != VINF_SUCCESS)
9929 break;
9930
9931 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
9932 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Override any obnoxious code in the above two calls. */
9933
9934 /*
9935 * Now we can run the guest code.
9936 */
9937 int rcRun = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
9938
9939 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
9940
9941 /*
9942 * Restore any residual host-state and save any bits shared between host
9943 * and guest into the guest-CPU state. Re-enables interrupts!
9944 */
9945 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, VBOXSTRICTRC_TODO(rcStrict));
9946
9947 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
9948 if (RT_SUCCESS(rcRun))
9949 { /* very likely */ }
9950 else
9951 {
9952 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
9953 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rcRun, pCtx, &VmxTransient);
9954 return rcRun;
9955 }
9956
9957 /* Profile the VM-exit. */
9958 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
9959 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
9960 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
9961 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
9962 HMVMX_START_EXIT_DISPATCH_PROF();
9963
9964 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
9965
9966 /*
9967 * Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitDebug().
9968 */
9969 rcStrict = hmR0VmxRunDebugHandleExit(pVM, pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, &DbgState);
9970 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
9971 if (rcStrict != VINF_SUCCESS)
9972 break;
9973 if (cLoops > pVM->hm.s.cMaxResumeLoops)
9974 {
9975 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
9976 rcStrict = VINF_EM_RAW_INTERRUPT;
9977 break;
9978 }
9979
9980 /*
9981 * Stepping: Did the RIP change, if so, consider it a single step.
9982 * Otherwise, make sure one of the TFs gets set.
9983 */
9984 if (fStepping)
9985 {
9986 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pCtx);
9987 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pCtx);
9988 AssertRCReturn(rc2, rc2);
9989 if ( pCtx->rip != DbgState.uRipStart
9990 || pCtx->cs.Sel != DbgState.uCsStart)
9991 {
9992 rcStrict = VINF_EM_DBG_STEPPED;
9993 break;
9994 }
9995 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
9996 }
9997
9998 /*
9999 * Update when dtrace settings changes (DBGF kicks us, so no need to check).
10000 */
10001 if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo)
10002 hmR0VmxPreRunGuestDebugStateUpdate(pVM, pVCpu, pCtx, &DbgState, &VmxTransient);
10003 }
10004
10005 /*
10006 * Clear the X86_EFL_TF if necessary.
10007 */
10008 if (pVCpu->hm.s.fClearTrapFlag)
10009 {
10010 int rc2 = hmR0VmxSaveGuestRflags(pVCpu, pCtx);
10011 AssertRCReturn(rc2, rc2);
10012 pVCpu->hm.s.fClearTrapFlag = false;
10013 pCtx->eflags.Bits.u1TF = 0;
10014 }
10015 /** @todo there seems to be issues with the resume flag when the monitor trap
10016 * flag is pending without being used. Seen early in bios init when
10017 * accessing APIC page in protected mode. */
10018
10019 /*
10020 * Restore VM-exit control settings as we may not reenter this function the
10021 * next time around.
10022 */
10023 rcStrict = hmR0VmxRunDebugStateRevert(pVCpu, &DbgState, rcStrict);
10024
10025 /* Restore HMCPU indicators. */
10026 pVCpu->hm.s.fUsingDebugLoop = false;
10027 pVCpu->hm.s.fDebugWantRdTscExit = false;
10028 pVCpu->hm.s.fSingleInstruction = fSavedSingleInstruction;
10029
10030 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
10031 return rcStrict;
10032}
10033
10034
10035/** @} */
10036
10037
10038/**
10039 * Checks if any expensive dtrace probes are enabled and we should go to the
10040 * debug loop.
10041 *
10042 * @returns true if we should use debug loop, false if not.
10043 */
10044static bool hmR0VmxAnyExpensiveProbesEnabled(void)
10045{
10046 /* It's probably faster to OR the raw 32-bit counter variables together.
10047 Since the variables are in an array and the probes are next to one
10048 another (more or less), we have good locality. So, better read
10049 eight-nine cache lines ever time and only have one conditional, than
10050 128+ conditionals, right? */
10051 return ( VBOXVMM_R0_HMVMX_VMEXIT_ENABLED_RAW() /* expensive too due to context */
10052 | VBOXVMM_XCPT_DE_ENABLED_RAW()
10053 | VBOXVMM_XCPT_DB_ENABLED_RAW()
10054 | VBOXVMM_XCPT_BP_ENABLED_RAW()
10055 | VBOXVMM_XCPT_OF_ENABLED_RAW()
10056 | VBOXVMM_XCPT_BR_ENABLED_RAW()
10057 | VBOXVMM_XCPT_UD_ENABLED_RAW()
10058 | VBOXVMM_XCPT_NM_ENABLED_RAW()
10059 | VBOXVMM_XCPT_DF_ENABLED_RAW()
10060 | VBOXVMM_XCPT_TS_ENABLED_RAW()
10061 | VBOXVMM_XCPT_NP_ENABLED_RAW()
10062 | VBOXVMM_XCPT_SS_ENABLED_RAW()
10063 | VBOXVMM_XCPT_GP_ENABLED_RAW()
10064 | VBOXVMM_XCPT_PF_ENABLED_RAW()
10065 | VBOXVMM_XCPT_MF_ENABLED_RAW()
10066 | VBOXVMM_XCPT_AC_ENABLED_RAW()
10067 | VBOXVMM_XCPT_XF_ENABLED_RAW()
10068 | VBOXVMM_XCPT_VE_ENABLED_RAW()
10069 | VBOXVMM_XCPT_SX_ENABLED_RAW()
10070 | VBOXVMM_INT_SOFTWARE_ENABLED_RAW()
10071 | VBOXVMM_INT_HARDWARE_ENABLED_RAW()
10072 ) != 0
10073 || ( VBOXVMM_INSTR_HALT_ENABLED_RAW()
10074 | VBOXVMM_INSTR_MWAIT_ENABLED_RAW()
10075 | VBOXVMM_INSTR_MONITOR_ENABLED_RAW()
10076 | VBOXVMM_INSTR_CPUID_ENABLED_RAW()
10077 | VBOXVMM_INSTR_INVD_ENABLED_RAW()
10078 | VBOXVMM_INSTR_WBINVD_ENABLED_RAW()
10079 | VBOXVMM_INSTR_INVLPG_ENABLED_RAW()
10080 | VBOXVMM_INSTR_RDTSC_ENABLED_RAW()
10081 | VBOXVMM_INSTR_RDTSCP_ENABLED_RAW()
10082 | VBOXVMM_INSTR_RDPMC_ENABLED_RAW()
10083 | VBOXVMM_INSTR_RDMSR_ENABLED_RAW()
10084 | VBOXVMM_INSTR_WRMSR_ENABLED_RAW()
10085 | VBOXVMM_INSTR_CRX_READ_ENABLED_RAW()
10086 | VBOXVMM_INSTR_CRX_WRITE_ENABLED_RAW()
10087 | VBOXVMM_INSTR_DRX_READ_ENABLED_RAW()
10088 | VBOXVMM_INSTR_DRX_WRITE_ENABLED_RAW()
10089 | VBOXVMM_INSTR_PAUSE_ENABLED_RAW()
10090 | VBOXVMM_INSTR_XSETBV_ENABLED_RAW()
10091 | VBOXVMM_INSTR_SIDT_ENABLED_RAW()
10092 | VBOXVMM_INSTR_LIDT_ENABLED_RAW()
10093 | VBOXVMM_INSTR_SGDT_ENABLED_RAW()
10094 | VBOXVMM_INSTR_LGDT_ENABLED_RAW()
10095 | VBOXVMM_INSTR_SLDT_ENABLED_RAW()
10096 | VBOXVMM_INSTR_LLDT_ENABLED_RAW()
10097 | VBOXVMM_INSTR_STR_ENABLED_RAW()
10098 | VBOXVMM_INSTR_LTR_ENABLED_RAW()
10099 | VBOXVMM_INSTR_GETSEC_ENABLED_RAW()
10100 | VBOXVMM_INSTR_RSM_ENABLED_RAW()
10101 | VBOXVMM_INSTR_RDRAND_ENABLED_RAW()
10102 | VBOXVMM_INSTR_RDSEED_ENABLED_RAW()
10103 | VBOXVMM_INSTR_XSAVES_ENABLED_RAW()
10104 | VBOXVMM_INSTR_XRSTORS_ENABLED_RAW()
10105 | VBOXVMM_INSTR_VMM_CALL_ENABLED_RAW()
10106 | VBOXVMM_INSTR_VMX_VMCLEAR_ENABLED_RAW()
10107 | VBOXVMM_INSTR_VMX_VMLAUNCH_ENABLED_RAW()
10108 | VBOXVMM_INSTR_VMX_VMPTRLD_ENABLED_RAW()
10109 | VBOXVMM_INSTR_VMX_VMPTRST_ENABLED_RAW()
10110 | VBOXVMM_INSTR_VMX_VMREAD_ENABLED_RAW()
10111 | VBOXVMM_INSTR_VMX_VMRESUME_ENABLED_RAW()
10112 | VBOXVMM_INSTR_VMX_VMWRITE_ENABLED_RAW()
10113 | VBOXVMM_INSTR_VMX_VMXOFF_ENABLED_RAW()
10114 | VBOXVMM_INSTR_VMX_VMXON_ENABLED_RAW()
10115 | VBOXVMM_INSTR_VMX_VMFUNC_ENABLED_RAW()
10116 | VBOXVMM_INSTR_VMX_INVEPT_ENABLED_RAW()
10117 | VBOXVMM_INSTR_VMX_INVVPID_ENABLED_RAW()
10118 | VBOXVMM_INSTR_VMX_INVPCID_ENABLED_RAW()
10119 ) != 0
10120 || ( VBOXVMM_EXIT_TASK_SWITCH_ENABLED_RAW()
10121 | VBOXVMM_EXIT_HALT_ENABLED_RAW()
10122 | VBOXVMM_EXIT_MWAIT_ENABLED_RAW()
10123 | VBOXVMM_EXIT_MONITOR_ENABLED_RAW()
10124 | VBOXVMM_EXIT_CPUID_ENABLED_RAW()
10125 | VBOXVMM_EXIT_INVD_ENABLED_RAW()
10126 | VBOXVMM_EXIT_WBINVD_ENABLED_RAW()
10127 | VBOXVMM_EXIT_INVLPG_ENABLED_RAW()
10128 | VBOXVMM_EXIT_RDTSC_ENABLED_RAW()
10129 | VBOXVMM_EXIT_RDTSCP_ENABLED_RAW()
10130 | VBOXVMM_EXIT_RDPMC_ENABLED_RAW()
10131 | VBOXVMM_EXIT_RDMSR_ENABLED_RAW()
10132 | VBOXVMM_EXIT_WRMSR_ENABLED_RAW()
10133 | VBOXVMM_EXIT_CRX_READ_ENABLED_RAW()
10134 | VBOXVMM_EXIT_CRX_WRITE_ENABLED_RAW()
10135 | VBOXVMM_EXIT_DRX_READ_ENABLED_RAW()
10136 | VBOXVMM_EXIT_DRX_WRITE_ENABLED_RAW()
10137 | VBOXVMM_EXIT_PAUSE_ENABLED_RAW()
10138 | VBOXVMM_EXIT_XSETBV_ENABLED_RAW()
10139 | VBOXVMM_EXIT_SIDT_ENABLED_RAW()
10140 | VBOXVMM_EXIT_LIDT_ENABLED_RAW()
10141 | VBOXVMM_EXIT_SGDT_ENABLED_RAW()
10142 | VBOXVMM_EXIT_LGDT_ENABLED_RAW()
10143 | VBOXVMM_EXIT_SLDT_ENABLED_RAW()
10144 | VBOXVMM_EXIT_LLDT_ENABLED_RAW()
10145 | VBOXVMM_EXIT_STR_ENABLED_RAW()
10146 | VBOXVMM_EXIT_LTR_ENABLED_RAW()
10147 | VBOXVMM_EXIT_GETSEC_ENABLED_RAW()
10148 | VBOXVMM_EXIT_RSM_ENABLED_RAW()
10149 | VBOXVMM_EXIT_RDRAND_ENABLED_RAW()
10150 | VBOXVMM_EXIT_RDSEED_ENABLED_RAW()
10151 | VBOXVMM_EXIT_XSAVES_ENABLED_RAW()
10152 | VBOXVMM_EXIT_XRSTORS_ENABLED_RAW()
10153 | VBOXVMM_EXIT_VMM_CALL_ENABLED_RAW()
10154 | VBOXVMM_EXIT_VMX_VMCLEAR_ENABLED_RAW()
10155 | VBOXVMM_EXIT_VMX_VMLAUNCH_ENABLED_RAW()
10156 | VBOXVMM_EXIT_VMX_VMPTRLD_ENABLED_RAW()
10157 | VBOXVMM_EXIT_VMX_VMPTRST_ENABLED_RAW()
10158 | VBOXVMM_EXIT_VMX_VMREAD_ENABLED_RAW()
10159 | VBOXVMM_EXIT_VMX_VMRESUME_ENABLED_RAW()
10160 | VBOXVMM_EXIT_VMX_VMWRITE_ENABLED_RAW()
10161 | VBOXVMM_EXIT_VMX_VMXOFF_ENABLED_RAW()
10162 | VBOXVMM_EXIT_VMX_VMXON_ENABLED_RAW()
10163 | VBOXVMM_EXIT_VMX_VMFUNC_ENABLED_RAW()
10164 | VBOXVMM_EXIT_VMX_INVEPT_ENABLED_RAW()
10165 | VBOXVMM_EXIT_VMX_INVVPID_ENABLED_RAW()
10166 | VBOXVMM_EXIT_VMX_INVPCID_ENABLED_RAW()
10167 | VBOXVMM_EXIT_VMX_EPT_VIOLATION_ENABLED_RAW()
10168 | VBOXVMM_EXIT_VMX_EPT_MISCONFIG_ENABLED_RAW()
10169 | VBOXVMM_EXIT_VMX_VAPIC_ACCESS_ENABLED_RAW()
10170 | VBOXVMM_EXIT_VMX_VAPIC_WRITE_ENABLED_RAW()
10171 ) != 0;
10172}
10173
10174
10175/**
10176 * Runs the guest code using VT-x.
10177 *
10178 * @returns Strict VBox status code (i.e. informational status codes too).
10179 * @param pVM The cross context VM structure.
10180 * @param pVCpu The cross context virtual CPU structure.
10181 * @param pCtx Pointer to the guest-CPU context.
10182 */
10183VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
10184{
10185 Assert(VMMRZCallRing3IsEnabled(pVCpu));
10186 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);
10187 HMVMX_ASSERT_PREEMPT_SAFE();
10188
10189 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pCtx);
10190
10191 VBOXSTRICTRC rcStrict;
10192 if ( !pVCpu->hm.s.fUseDebugLoop
10193 && (!VBOXVMM_ANY_PROBES_ENABLED() || !hmR0VmxAnyExpensiveProbesEnabled())
10194 && !DBGFIsStepping(pVCpu) )
10195 rcStrict = hmR0VmxRunGuestCodeNormal(pVM, pVCpu, pCtx);
10196 else
10197 rcStrict = hmR0VmxRunGuestCodeDebug(pVM, pVCpu, pCtx);
10198
10199 if (rcStrict == VERR_EM_INTERPRETER)
10200 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
10201 else if (rcStrict == VINF_EM_RESET)
10202 rcStrict = VINF_EM_TRIPLE_FAULT;
10203
10204 int rc2 = hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rcStrict);
10205 if (RT_FAILURE(rc2))
10206 {
10207 pVCpu->hm.s.u32HMError = (uint32_t)VBOXSTRICTRC_VAL(rcStrict);
10208 rcStrict = rc2;
10209 }
10210 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
10211 return rcStrict;
10212}
10213
10214
10215#ifndef HMVMX_USE_FUNCTION_TABLE
10216DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
10217{
10218# ifdef DEBUG_ramshankar
10219# define RETURN_EXIT_CALL(a_CallExpr) \
10220 do { \
10221 int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); AssertRC(rc2); \
10222 VBOXSTRICTRC rcStrict = a_CallExpr; \
10223 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); \
10224 return rcStrict; \
10225 } while (0)
10226# else
10227# define RETURN_EXIT_CALL(a_CallExpr) return a_CallExpr
10228# endif
10229 switch (rcReason)
10230 {
10231 case VMX_EXIT_EPT_MISCONFIG: RETURN_EXIT_CALL(hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient));
10232 case VMX_EXIT_EPT_VIOLATION: RETURN_EXIT_CALL(hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient));
10233 case VMX_EXIT_IO_INSTR: RETURN_EXIT_CALL(hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient));
10234 case VMX_EXIT_CPUID: RETURN_EXIT_CALL(hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient));
10235 case VMX_EXIT_RDTSC: RETURN_EXIT_CALL(hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient));
10236 case VMX_EXIT_RDTSCP: RETURN_EXIT_CALL(hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient));
10237 case VMX_EXIT_APIC_ACCESS: RETURN_EXIT_CALL(hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient));
10238 case VMX_EXIT_XCPT_OR_NMI: RETURN_EXIT_CALL(hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient));
10239 case VMX_EXIT_MOV_CRX: RETURN_EXIT_CALL(hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient));
10240 case VMX_EXIT_EXT_INT: RETURN_EXIT_CALL(hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient));
10241 case VMX_EXIT_INT_WINDOW: RETURN_EXIT_CALL(hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient));
10242 case VMX_EXIT_MWAIT: RETURN_EXIT_CALL(hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient));
10243 case VMX_EXIT_MONITOR: RETURN_EXIT_CALL(hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient));
10244 case VMX_EXIT_TASK_SWITCH: RETURN_EXIT_CALL(hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient));
10245 case VMX_EXIT_PREEMPT_TIMER: RETURN_EXIT_CALL(hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient));
10246 case VMX_EXIT_RDMSR: RETURN_EXIT_CALL(hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient));
10247 case VMX_EXIT_WRMSR: RETURN_EXIT_CALL(hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient));
10248 case VMX_EXIT_MOV_DRX: RETURN_EXIT_CALL(hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient));
10249 case VMX_EXIT_TPR_BELOW_THRESHOLD: RETURN_EXIT_CALL(hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient));
10250 case VMX_EXIT_HLT: RETURN_EXIT_CALL(hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient));
10251 case VMX_EXIT_INVD: RETURN_EXIT_CALL(hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient));
10252 case VMX_EXIT_INVLPG: RETURN_EXIT_CALL(hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient));
10253 case VMX_EXIT_RSM: RETURN_EXIT_CALL(hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient));
10254 case VMX_EXIT_MTF: RETURN_EXIT_CALL(hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient));
10255 case VMX_EXIT_PAUSE: RETURN_EXIT_CALL(hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient));
10256 case VMX_EXIT_XDTR_ACCESS: RETURN_EXIT_CALL(hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));
10257 case VMX_EXIT_TR_ACCESS: RETURN_EXIT_CALL(hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));
10258 case VMX_EXIT_WBINVD: RETURN_EXIT_CALL(hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient));
10259 case VMX_EXIT_XSETBV: RETURN_EXIT_CALL(hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient));
10260 case VMX_EXIT_RDRAND: RETURN_EXIT_CALL(hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient));
10261 case VMX_EXIT_INVPCID: RETURN_EXIT_CALL(hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient));
10262 case VMX_EXIT_GETSEC: RETURN_EXIT_CALL(hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient));
10263 case VMX_EXIT_RDPMC: RETURN_EXIT_CALL(hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient));
10264 case VMX_EXIT_VMCALL: RETURN_EXIT_CALL(hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient));
10265
10266 case VMX_EXIT_TRIPLE_FAULT: return hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient);
10267 case VMX_EXIT_NMI_WINDOW: return hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient);
10268 case VMX_EXIT_INIT_SIGNAL: return hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient);
10269 case VMX_EXIT_SIPI: return hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient);
10270 case VMX_EXIT_IO_SMI: return hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient);
10271 case VMX_EXIT_SMI: return hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient);
10272 case VMX_EXIT_ERR_MSR_LOAD: return hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient);
10273 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient);
10274 case VMX_EXIT_ERR_MACHINE_CHECK: return hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient);
10275
10276 case VMX_EXIT_VMCLEAR:
10277 case VMX_EXIT_VMLAUNCH:
10278 case VMX_EXIT_VMPTRLD:
10279 case VMX_EXIT_VMPTRST:
10280 case VMX_EXIT_VMREAD:
10281 case VMX_EXIT_VMRESUME:
10282 case VMX_EXIT_VMWRITE:
10283 case VMX_EXIT_VMXOFF:
10284 case VMX_EXIT_VMXON:
10285 case VMX_EXIT_INVEPT:
10286 case VMX_EXIT_INVVPID:
10287 case VMX_EXIT_VMFUNC:
10288 case VMX_EXIT_XSAVES:
10289 case VMX_EXIT_XRSTORS:
10290 return hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
10291 case VMX_EXIT_RESERVED_60:
10292 case VMX_EXIT_RDSEED: /* only spurious exits, so undefined */
10293 case VMX_EXIT_RESERVED_62:
10294 default:
10295 return hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
10296 }
10297#undef RETURN_EXIT_CALL
10298}
10299#endif /* !HMVMX_USE_FUNCTION_TABLE */
10300
10301
10302#ifdef VBOX_STRICT
10303/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
10304# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
10305 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
10306
10307# define HMVMX_ASSERT_PREEMPT_CPUID() \
10308 do { \
10309 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
10310 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
10311 } while (0)
10312
10313# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
10314 do { \
10315 AssertPtr(pVCpu); \
10316 AssertPtr(pMixedCtx); \
10317 AssertPtr(pVmxTransient); \
10318 Assert(pVmxTransient->fVMEntryFailed == false); \
10319 Assert(ASMIntAreEnabled()); \
10320 HMVMX_ASSERT_PREEMPT_SAFE(); \
10321 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
10322 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", pVCpu->idCpu)); \
10323 HMVMX_ASSERT_PREEMPT_SAFE(); \
10324 if (VMMR0IsLogFlushDisabled(pVCpu)) \
10325 HMVMX_ASSERT_PREEMPT_CPUID(); \
10326 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
10327 } while (0)
10328
10329# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
10330 do { \
10331 Log4Func(("\n")); \
10332 } while (0)
10333#else /* nonstrict builds: */
10334# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
10335 do { \
10336 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
10337 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient); \
10338 } while (0)
10339# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while (0)
10340#endif
10341
10342
10343/**
10344 * Advances the guest RIP after reading it from the VMCS.
10345 *
10346 * @returns VBox status code, no informational status codes.
10347 * @param pVCpu The cross context virtual CPU structure.
10348 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
10349 * out-of-sync. Make sure to update the required fields
10350 * before using them.
10351 * @param pVmxTransient Pointer to the VMX transient structure.
10352 *
10353 * @remarks No-long-jump zone!!!
10354 */
10355DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10356{
10357 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
10358 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
10359 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
10360 AssertRCReturn(rc, rc);
10361
10362 pMixedCtx->rip += pVmxTransient->cbInstr;
10363 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
10364
10365 /*
10366 * Deliver a debug exception to the guest if it is single-stepping. Don't directly inject a #DB but use the
10367 * pending debug exception field as it takes care of priority of events.
10368 *
10369 * See Intel spec. 32.2.1 "Debug Exceptions".
10370 */
10371 if ( !pVCpu->hm.s.fSingleInstruction
10372 && pMixedCtx->eflags.Bits.u1TF)
10373 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
10374
10375 return VINF_SUCCESS;
10376}
10377
10378
10379/**
10380 * Tries to determine what part of the guest-state VT-x has deemed as invalid
10381 * and update error record fields accordingly.
10382 *
10383 * @return VMX_IGS_* return codes.
10384 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
10385 * wrong with the guest state.
10386 *
10387 * @param pVM The cross context VM structure.
10388 * @param pVCpu The cross context virtual CPU structure.
10389 * @param pCtx Pointer to the guest-CPU state.
10390 *
10391 * @remarks This function assumes our cache of the VMCS controls
10392 * are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
10393 */
10394static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
10395{
10396#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
10397#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \
10398 uError = (err); \
10399 break; \
10400 } else do { } while (0)
10401
10402 int rc;
10403 uint32_t uError = VMX_IGS_ERROR;
10404 uint32_t u32Val;
10405 bool fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
10406
10407 do
10408 {
10409 /*
10410 * CR0.
10411 */
10412 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
10413 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
10414 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG).
10415 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
10416 if (fUnrestrictedGuest)
10417 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
10418
10419 uint32_t u32GuestCR0;
10420 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCR0);
10421 AssertRCBreak(rc);
10422 HMVMX_CHECK_BREAK((u32GuestCR0 & uSetCR0) == uSetCR0, VMX_IGS_CR0_FIXED1);
10423 HMVMX_CHECK_BREAK(!(u32GuestCR0 & ~uZapCR0), VMX_IGS_CR0_FIXED0);
10424 if ( !fUnrestrictedGuest
10425 && (u32GuestCR0 & X86_CR0_PG)
10426 && !(u32GuestCR0 & X86_CR0_PE))
10427 {
10428 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
10429 }
10430
10431 /*
10432 * CR4.
10433 */
10434 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
10435 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
10436
10437 uint32_t u32GuestCR4;
10438 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCR4);
10439 AssertRCBreak(rc);
10440 HMVMX_CHECK_BREAK((u32GuestCR4 & uSetCR4) == uSetCR4, VMX_IGS_CR4_FIXED1);
10441 HMVMX_CHECK_BREAK(!(u32GuestCR4 & ~uZapCR4), VMX_IGS_CR4_FIXED0);
10442
10443 /*
10444 * IA32_DEBUGCTL MSR.
10445 */
10446 uint64_t u64Val;
10447 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
10448 AssertRCBreak(rc);
10449 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
10450 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
10451 {
10452 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
10453 }
10454 uint64_t u64DebugCtlMsr = u64Val;
10455
10456#ifdef VBOX_STRICT
10457 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
10458 AssertRCBreak(rc);
10459 Assert(u32Val == pVCpu->hm.s.vmx.u32EntryCtls);
10460#endif
10461 bool const fLongModeGuest = RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
10462
10463 /*
10464 * RIP and RFLAGS.
10465 */
10466 uint32_t u32Eflags;
10467#if HC_ARCH_BITS == 64
10468 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
10469 AssertRCBreak(rc);
10470 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
10471 if ( !fLongModeGuest
10472 || !pCtx->cs.Attr.n.u1Long)
10473 {
10474 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
10475 }
10476 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
10477 * must be identical if the "IA-32e mode guest" VM-entry
10478 * control is 1 and CS.L is 1. No check applies if the
10479 * CPU supports 64 linear-address bits. */
10480
10481 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
10482 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
10483 AssertRCBreak(rc);
10484 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
10485 VMX_IGS_RFLAGS_RESERVED);
10486 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
10487 u32Eflags = u64Val;
10488#else
10489 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
10490 AssertRCBreak(rc);
10491 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */
10492 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
10493#endif
10494
10495 if ( fLongModeGuest
10496 || ( fUnrestrictedGuest
10497 && !(u32GuestCR0 & X86_CR0_PE)))
10498 {
10499 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
10500 }
10501
10502 uint32_t u32EntryInfo;
10503 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
10504 AssertRCBreak(rc);
10505 if ( VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
10506 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
10507 {
10508 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
10509 }
10510
10511 /*
10512 * 64-bit checks.
10513 */
10514#if HC_ARCH_BITS == 64
10515 if (fLongModeGuest)
10516 {
10517 HMVMX_CHECK_BREAK(u32GuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
10518 HMVMX_CHECK_BREAK(u32GuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
10519 }
10520
10521 if ( !fLongModeGuest
10522 && (u32GuestCR4 & X86_CR4_PCIDE))
10523 {
10524 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
10525 }
10526
10527 /** @todo CR3 field must be such that bits 63:52 and bits in the range
10528 * 51:32 beyond the processor's physical-address width are 0. */
10529
10530 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
10531 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
10532 {
10533 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
10534 }
10535
10536 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
10537 AssertRCBreak(rc);
10538 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
10539
10540 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
10541 AssertRCBreak(rc);
10542 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
10543#endif
10544
10545 /*
10546 * PERF_GLOBAL MSR.
10547 */
10548 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR)
10549 {
10550 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
10551 AssertRCBreak(rc);
10552 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
10553 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
10554 }
10555
10556 /*
10557 * PAT MSR.
10558 */
10559 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR)
10560 {
10561 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
10562 AssertRCBreak(rc);
10563 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
10564 for (unsigned i = 0; i < 8; i++)
10565 {
10566 uint8_t u8Val = (u64Val & 0xff);
10567 if ( u8Val != 0 /* UC */
10568 && u8Val != 1 /* WC */
10569 && u8Val != 4 /* WT */
10570 && u8Val != 5 /* WP */
10571 && u8Val != 6 /* WB */
10572 && u8Val != 7 /* UC- */)
10573 {
10574 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
10575 }
10576 u64Val >>= 8;
10577 }
10578 }
10579
10580 /*
10581 * EFER MSR.
10582 */
10583 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
10584 {
10585 Assert(pVM->hm.s.vmx.fSupportsVmcsEfer);
10586 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
10587 AssertRCBreak(rc);
10588 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
10589 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
10590 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVCpu->hm.s.vmx.u32EntryCtls
10591 & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
10592 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
10593 HMVMX_CHECK_BREAK( fUnrestrictedGuest
10594 || !(u32GuestCR0 & X86_CR0_PG)
10595 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
10596 VMX_IGS_EFER_LMA_LME_MISMATCH);
10597 }
10598
10599 /*
10600 * Segment registers.
10601 */
10602 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
10603 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
10604 if (!(u32Eflags & X86_EFL_VM))
10605 {
10606 /* CS */
10607 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
10608 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
10609 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
10610 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
10611 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
10612 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
10613 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
10614 /* CS cannot be loaded with NULL in protected mode. */
10615 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
10616 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
10617 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
10618 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
10619 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
10620 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
10621 else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
10622 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
10623 else
10624 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
10625
10626 /* SS */
10627 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10628 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
10629 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
10630 if ( !(pCtx->cr0 & X86_CR0_PE)
10631 || pCtx->cs.Attr.n.u4Type == 3)
10632 {
10633 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
10634 }
10635 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
10636 {
10637 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
10638 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
10639 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
10640 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
10641 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
10642 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
10643 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
10644 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
10645 }
10646
10647 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
10648 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
10649 {
10650 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
10651 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
10652 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10653 || pCtx->ds.Attr.n.u4Type > 11
10654 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
10655 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
10656 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
10657 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
10658 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
10659 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
10660 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
10661 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10662 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
10663 }
10664 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
10665 {
10666 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
10667 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
10668 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10669 || pCtx->es.Attr.n.u4Type > 11
10670 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
10671 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
10672 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
10673 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
10674 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
10675 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
10676 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
10677 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10678 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
10679 }
10680 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
10681 {
10682 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
10683 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
10684 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10685 || pCtx->fs.Attr.n.u4Type > 11
10686 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
10687 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
10688 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
10689 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
10690 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
10691 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
10692 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
10693 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10694 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
10695 }
10696 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
10697 {
10698 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
10699 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
10700 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10701 || pCtx->gs.Attr.n.u4Type > 11
10702 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
10703 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
10704 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
10705 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
10706 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
10707 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
10708 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
10709 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10710 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
10711 }
10712 /* 64-bit capable CPUs. */
10713#if HC_ARCH_BITS == 64
10714 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
10715 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
10716 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
10717 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
10718 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
10719 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
10720 VMX_IGS_LONGMODE_SS_BASE_INVALID);
10721 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
10722 VMX_IGS_LONGMODE_DS_BASE_INVALID);
10723 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
10724 VMX_IGS_LONGMODE_ES_BASE_INVALID);
10725#endif
10726 }
10727 else
10728 {
10729 /* V86 mode checks. */
10730 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
10731 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
10732 {
10733 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
10734 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
10735 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
10736 }
10737 else
10738 {
10739 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
10740 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
10741 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
10742 }
10743
10744 /* CS */
10745 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
10746 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
10747 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
10748 /* SS */
10749 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
10750 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
10751 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
10752 /* DS */
10753 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
10754 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
10755 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
10756 /* ES */
10757 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
10758 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
10759 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
10760 /* FS */
10761 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
10762 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
10763 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
10764 /* GS */
10765 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
10766 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
10767 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
10768 /* 64-bit capable CPUs. */
10769#if HC_ARCH_BITS == 64
10770 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
10771 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
10772 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
10773 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
10774 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
10775 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
10776 VMX_IGS_LONGMODE_SS_BASE_INVALID);
10777 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
10778 VMX_IGS_LONGMODE_DS_BASE_INVALID);
10779 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
10780 VMX_IGS_LONGMODE_ES_BASE_INVALID);
10781#endif
10782 }
10783
10784 /*
10785 * TR.
10786 */
10787 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
10788 /* 64-bit capable CPUs. */
10789#if HC_ARCH_BITS == 64
10790 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
10791#endif
10792 if (fLongModeGuest)
10793 {
10794 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
10795 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
10796 }
10797 else
10798 {
10799 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
10800 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
10801 VMX_IGS_TR_ATTR_TYPE_INVALID);
10802 }
10803 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
10804 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
10805 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
10806 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
10807 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
10808 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
10809 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
10810 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
10811
10812 /*
10813 * GDTR and IDTR.
10814 */
10815#if HC_ARCH_BITS == 64
10816 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
10817 AssertRCBreak(rc);
10818 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
10819
10820 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
10821 AssertRCBreak(rc);
10822 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
10823#endif
10824
10825 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
10826 AssertRCBreak(rc);
10827 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
10828
10829 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
10830 AssertRCBreak(rc);
10831 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
10832
10833 /*
10834 * Guest Non-Register State.
10835 */
10836 /* Activity State. */
10837 uint32_t u32ActivityState;
10838 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
10839 AssertRCBreak(rc);
10840 HMVMX_CHECK_BREAK( !u32ActivityState
10841 || (u32ActivityState & MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.Msrs.u64Misc)),
10842 VMX_IGS_ACTIVITY_STATE_INVALID);
10843 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
10844 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
10845 uint32_t u32IntrState;
10846 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32IntrState);
10847 AssertRCBreak(rc);
10848 if ( u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
10849 || u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
10850 {
10851 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
10852 }
10853
10854 /** @todo Activity state and injecting interrupts. Left as a todo since we
10855 * currently don't use activity states but ACTIVE. */
10856
10857 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
10858 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
10859
10860 /* Guest interruptibility-state. */
10861 HMVMX_CHECK_BREAK(!(u32IntrState & 0xfffffff0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
10862 HMVMX_CHECK_BREAK((u32IntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
10863 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS))
10864 != ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
10865 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
10866 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
10867 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
10868 || !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
10869 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
10870 if (VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo))
10871 {
10872 if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
10873 {
10874 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
10875 && !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
10876 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
10877 }
10878 else if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
10879 {
10880 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
10881 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
10882 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
10883 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
10884 }
10885 }
10886 /** @todo Assumes the processor is not in SMM. */
10887 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
10888 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
10889 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
10890 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
10891 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
10892 if ( (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
10893 && VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
10894 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
10895 {
10896 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI),
10897 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
10898 }
10899
10900 /* Pending debug exceptions. */
10901#if HC_ARCH_BITS == 64
10902 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val);
10903 AssertRCBreak(rc);
10904 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
10905 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
10906 u32Val = u64Val; /* For pending debug exceptions checks below. */
10907#else
10908 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val);
10909 AssertRCBreak(rc);
10910 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
10911 HMVMX_CHECK_BREAK(!(u32Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
10912#endif
10913
10914 if ( (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
10915 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS)
10916 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
10917 {
10918 if ( (u32Eflags & X86_EFL_TF)
10919 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
10920 {
10921 /* Bit 14 is PendingDebug.BS. */
10922 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
10923 }
10924 if ( !(u32Eflags & X86_EFL_TF)
10925 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
10926 {
10927 /* Bit 14 is PendingDebug.BS. */
10928 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
10929 }
10930 }
10931
10932 /* VMCS link pointer. */
10933 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
10934 AssertRCBreak(rc);
10935 if (u64Val != UINT64_C(0xffffffffffffffff))
10936 {
10937 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
10938 /** @todo Bits beyond the processor's physical-address width MBZ. */
10939 /** @todo 32-bit located in memory referenced by value of this field (as a
10940 * physical address) must contain the processor's VMCS revision ID. */
10941 /** @todo SMM checks. */
10942 }
10943
10944 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
10945 * not using Nested Paging? */
10946 if ( pVM->hm.s.fNestedPaging
10947 && !fLongModeGuest
10948 && CPUMIsGuestInPAEModeEx(pCtx))
10949 {
10950 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
10951 AssertRCBreak(rc);
10952 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10953
10954 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
10955 AssertRCBreak(rc);
10956 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10957
10958 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
10959 AssertRCBreak(rc);
10960 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10961
10962 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
10963 AssertRCBreak(rc);
10964 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10965 }
10966
10967 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
10968 if (uError == VMX_IGS_ERROR)
10969 uError = VMX_IGS_REASON_NOT_FOUND;
10970 } while (0);
10971
10972 pVCpu->hm.s.u32HMError = uError;
10973 return uError;
10974
10975#undef HMVMX_ERROR_BREAK
10976#undef HMVMX_CHECK_BREAK
10977}
10978
10979/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10980/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
10981/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10982
10983/** @name VM-exit handlers.
10984 * @{
10985 */
10986
10987/**
10988 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
10989 */
10990HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10991{
10992 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
10993 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
10994 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
10995 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
10996 return VINF_SUCCESS;
10997 return VINF_EM_RAW_INTERRUPT;
10998}
10999
11000
11001/**
11002 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
11003 */
11004HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11005{
11006 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11007 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
11008
11009 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11010 AssertRCReturn(rc, rc);
11011
11012 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
11013 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)
11014 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
11015 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
11016
11017 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
11018 {
11019 /*
11020 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we injected it ourselves and
11021 * anything we inject is not going to cause a VM-exit directly for the event being injected.
11022 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
11023 *
11024 * Dispatch the NMI to the host. See Intel spec. 27.5.5 "Updating Non-Register State".
11025 */
11026 VMXDispatchHostNmi();
11027 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
11028 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
11029 return VINF_SUCCESS;
11030 }
11031
11032 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11033 VBOXSTRICTRC rcStrictRc1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11034 if (RT_UNLIKELY(rcStrictRc1 == VINF_SUCCESS))
11035 { /* likely */ }
11036 else
11037 {
11038 if (rcStrictRc1 == VINF_HM_DOUBLE_FAULT)
11039 rcStrictRc1 = VINF_SUCCESS;
11040 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
11041 return rcStrictRc1;
11042 }
11043
11044 uint32_t uExitIntInfo = pVmxTransient->uExitIntInfo;
11045 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntInfo);
11046 switch (uIntType)
11047 {
11048 case VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT: /* Privileged software exception. (#DB from ICEBP) */
11049 Assert(uVector == X86_XCPT_DB);
11050 /* no break */
11051 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
11052 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT);
11053 /* no break */
11054 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
11055 {
11056 switch (uVector)
11057 {
11058 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
11059 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
11060 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;
11061 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
11062 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
11063 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
11064 case X86_XCPT_AC: rc = hmR0VmxExitXcptAC(pVCpu, pMixedCtx, pVmxTransient); break;
11065
11066 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
11067 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11068 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
11069 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11070 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
11071 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11072 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
11073 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11074 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
11075 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11076 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS);
11077 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11078 default:
11079 {
11080 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11081 AssertRCReturn(rc, rc);
11082
11083 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
11084 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
11085 {
11086 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
11087 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
11088 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
11089
11090 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11091 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11092 AssertRCReturn(rc, rc);
11093 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
11094 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode,
11095 0 /* GCPtrFaultAddress */);
11096 AssertRCReturn(rc, rc);
11097 }
11098 else
11099 {
11100 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
11101 pVCpu->hm.s.u32HMError = uVector;
11102 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
11103 }
11104 break;
11105 }
11106 }
11107 break;
11108 }
11109
11110 default:
11111 {
11112 pVCpu->hm.s.u32HMError = uExitIntInfo;
11113 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
11114 AssertMsgFailed(("Unexpected interruption info %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntInfo)));
11115 break;
11116 }
11117 }
11118 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
11119 return rc;
11120}
11121
11122
11123/**
11124 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
11125 */
11126HMVMX_EXIT_NSRC_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11127{
11128 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11129
11130 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
11131 hmR0VmxClearIntWindowExitVmcs(pVCpu);
11132
11133 /* Deliver the pending interrupts via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
11134 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
11135 return VINF_SUCCESS;
11136}
11137
11138
11139/**
11140 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
11141 */
11142HMVMX_EXIT_NSRC_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11143{
11144 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11145 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)))
11146 {
11147 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
11148 HMVMX_RETURN_UNEXPECTED_EXIT();
11149 }
11150
11151 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));
11152
11153 /*
11154 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
11155 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
11156 */
11157 uint32_t uIntrState = 0;
11158 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
11159 AssertRCReturn(rc, rc);
11160
11161 bool const fBlockSti = RT_BOOL(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
11162 if ( fBlockSti
11163 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
11164 {
11165 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
11166 }
11167
11168 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
11169 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
11170
11171 /* Deliver the pending NMI via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
11172 return VINF_SUCCESS;
11173}
11174
11175
11176/**
11177 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
11178 */
11179HMVMX_EXIT_NSRC_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11180{
11181 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11182 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
11183 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11184}
11185
11186
11187/**
11188 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
11189 */
11190HMVMX_EXIT_NSRC_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11191{
11192 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11193 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
11194 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11195}
11196
11197
11198/**
11199 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
11200 */
11201HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11202{
11203 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11204 PVM pVM = pVCpu->CTX_SUFF(pVM);
11205 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11206 if (RT_LIKELY(rc == VINF_SUCCESS))
11207 {
11208 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11209 Assert(pVmxTransient->cbInstr == 2);
11210 }
11211 else
11212 {
11213 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
11214 rc = VERR_EM_INTERPRETER;
11215 }
11216 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
11217 return rc;
11218}
11219
11220
11221/**
11222 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
11223 */
11224HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11225{
11226 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11227 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
11228 AssertRCReturn(rc, rc);
11229
11230 if (pMixedCtx->cr4 & X86_CR4_SMXE)
11231 return VINF_EM_RAW_EMULATE_INSTR;
11232
11233 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
11234 HMVMX_RETURN_UNEXPECTED_EXIT();
11235}
11236
11237
11238/**
11239 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
11240 */
11241HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11242{
11243 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11244 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
11245 AssertRCReturn(rc, rc);
11246
11247 PVM pVM = pVCpu->CTX_SUFF(pVM);
11248 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11249 if (RT_LIKELY(rc == VINF_SUCCESS))
11250 {
11251 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11252 Assert(pVmxTransient->cbInstr == 2);
11253 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
11254 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
11255 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
11256 }
11257 else
11258 rc = VERR_EM_INTERPRETER;
11259 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
11260 return rc;
11261}
11262
11263
11264/**
11265 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
11266 */
11267HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11268{
11269 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11270 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
11271 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
11272 AssertRCReturn(rc, rc);
11273
11274 PVM pVM = pVCpu->CTX_SUFF(pVM);
11275 rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
11276 if (RT_SUCCESS(rc))
11277 {
11278 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11279 Assert(pVmxTransient->cbInstr == 3);
11280 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
11281 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
11282 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
11283 }
11284 else
11285 {
11286 AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
11287 rc = VERR_EM_INTERPRETER;
11288 }
11289 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
11290 return rc;
11291}
11292
11293
11294/**
11295 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
11296 */
11297HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11298{
11299 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11300 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
11301 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
11302 AssertRCReturn(rc, rc);
11303
11304 PVM pVM = pVCpu->CTX_SUFF(pVM);
11305 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11306 if (RT_LIKELY(rc == VINF_SUCCESS))
11307 {
11308 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11309 Assert(pVmxTransient->cbInstr == 2);
11310 }
11311 else
11312 {
11313 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
11314 rc = VERR_EM_INTERPRETER;
11315 }
11316 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
11317 return rc;
11318}
11319
11320
11321/**
11322 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
11323 */
11324HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11325{
11326 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11327 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);
11328
11329 if (pVCpu->hm.s.fHypercallsEnabled)
11330 {
11331#if 0
11332 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11333#else
11334 /* Aggressive state sync. for now. */
11335 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
11336 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* For long-mode checks in gimKvmHypercall(). */
11337#endif
11338 rc |= hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11339 AssertRCReturn(rc, rc);
11340
11341 /** @todo pre-increment RIP before hypercall will break when we have to implement
11342 * continuing hypercalls (e.g. Hyper-V). */
11343 /** @todo r=bird: GIMHypercall will probably have to be able to return
11344 * informational status codes, so it should be made VBOXSTRICTRC. Not
11345 * doing that now because the status code handling isn't clean (i.e.
11346 * if you use RT_SUCCESS(rc) on the result of something, you don't
11347 * return rc in the success case, you return VINF_SUCCESS). */
11348 rc = GIMHypercall(pVCpu, pMixedCtx);
11349 /* If the hypercall changes anything other than guest general-purpose registers,
11350 we would need to reload the guest changed bits here before VM-entry. */
11351 return rc;
11352 }
11353
11354 Log4(("hmR0VmxExitVmcall: Hypercalls not enabled\n"));
11355 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
11356 return VINF_SUCCESS;
11357}
11358
11359
11360/**
11361 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
11362 */
11363HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11364{
11365 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11366 PVM pVM = pVCpu->CTX_SUFF(pVM);
11367 Assert(!pVM->hm.s.fNestedPaging || pVCpu->hm.s.fUsingDebugLoop);
11368
11369 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11370 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11371 AssertRCReturn(rc, rc);
11372
11373 VBOXSTRICTRC rcStrict = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
11374 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
11375 rcStrict = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11376 else
11377 AssertMsg(rcStrict == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RX64 failed with %Rrc\n",
11378 pVmxTransient->uExitQualification, VBOXSTRICTRC_VAL(rcStrict)));
11379 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
11380 return rcStrict;
11381}
11382
11383
11384/**
11385 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
11386 */
11387HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11388{
11389 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11390 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11391 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
11392 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11393 AssertRCReturn(rc, rc);
11394
11395 PVM pVM = pVCpu->CTX_SUFF(pVM);
11396 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11397 if (RT_LIKELY(rc == VINF_SUCCESS))
11398 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11399 else
11400 {
11401 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
11402 rc = VERR_EM_INTERPRETER;
11403 }
11404 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
11405 return rc;
11406}
11407
11408
11409/**
11410 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
11411 */
11412HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11413{
11414 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11415 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11416 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
11417 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11418 AssertRCReturn(rc, rc);
11419
11420 PVM pVM = pVCpu->CTX_SUFF(pVM);
11421 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11422 rc = VBOXSTRICTRC_VAL(rc2);
11423 if (RT_LIKELY( rc == VINF_SUCCESS
11424 || rc == VINF_EM_HALT))
11425 {
11426 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11427 AssertRCReturn(rc3, rc3);
11428
11429 if ( rc == VINF_EM_HALT
11430 && EMMonitorWaitShouldContinue(pVCpu, pMixedCtx))
11431 {
11432 rc = VINF_SUCCESS;
11433 }
11434 }
11435 else
11436 {
11437 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
11438 rc = VERR_EM_INTERPRETER;
11439 }
11440 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
11441 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
11442 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
11443 return rc;
11444}
11445
11446
11447/**
11448 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
11449 */
11450HMVMX_EXIT_NSRC_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11451{
11452 /*
11453 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
11454 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
11455 * executing VMCALL in VMX root operation. If we get here, something funny is going on.
11456 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
11457 */
11458 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11459 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11460 HMVMX_RETURN_UNEXPECTED_EXIT();
11461}
11462
11463
11464/**
11465 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
11466 */
11467HMVMX_EXIT_NSRC_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11468{
11469 /*
11470 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
11471 * root operation. Only an STM (SMM transfer monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL
11472 * in VMX root mode or receive an SMI. If we get here, something funny is going on.
11473 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
11474 */
11475 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11476 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11477 HMVMX_RETURN_UNEXPECTED_EXIT();
11478}
11479
11480
11481/**
11482 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
11483 */
11484HMVMX_EXIT_NSRC_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11485{
11486 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
11487 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11488 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11489 HMVMX_RETURN_UNEXPECTED_EXIT();
11490}
11491
11492
11493/**
11494 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
11495 */
11496HMVMX_EXIT_NSRC_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11497{
11498 /*
11499 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
11500 * don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
11501 * See Intel spec. 25.3 "Other Causes of VM-exits".
11502 */
11503 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11504 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11505 HMVMX_RETURN_UNEXPECTED_EXIT();
11506}
11507
11508
11509/**
11510 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
11511 * VM-exit.
11512 */
11513HMVMX_EXIT_NSRC_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11514{
11515 /*
11516 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
11517 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery" and Intel spec. 29.3 "VMX Instructions" for "VMXON".
11518 *
11519 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these VM-exits.
11520 * See Intel spec. "23.8 Restrictions on VMX operation".
11521 */
11522 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11523 return VINF_SUCCESS;
11524}
11525
11526
11527/**
11528 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
11529 * VM-exit.
11530 */
11531HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11532{
11533 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11534 return VINF_EM_RESET;
11535}
11536
11537
11538/**
11539 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
11540 */
11541HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11542{
11543 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11544 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
11545 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
11546 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
11547 AssertRCReturn(rc, rc);
11548
11549 pMixedCtx->rip++;
11550 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
11551 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
11552 rc = VINF_SUCCESS;
11553 else
11554 rc = VINF_EM_HALT;
11555
11556 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
11557 if (rc != VINF_SUCCESS)
11558 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
11559 return rc;
11560}
11561
11562
11563/**
11564 * VM-exit handler for instructions that result in a \#UD exception delivered to
11565 * the guest.
11566 */
11567HMVMX_EXIT_NSRC_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11568{
11569 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11570 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
11571 return VINF_SUCCESS;
11572}
11573
11574
11575/**
11576 * VM-exit handler for expiry of the VMX preemption timer.
11577 */
11578HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11579{
11580 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11581
11582 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
11583 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
11584
11585 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
11586 PVM pVM = pVCpu->CTX_SUFF(pVM);
11587 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
11588 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
11589 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
11590}
11591
11592
11593/**
11594 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
11595 */
11596HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11597{
11598 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11599
11600 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11601 rc |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/);
11602 rc |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
11603 AssertRCReturn(rc, rc);
11604
11605 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbInstr);
11606 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);
11607
11608 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
11609
11610 return rcStrict;
11611}
11612
11613
11614/**
11615 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
11616 */
11617HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11618{
11619 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11620
11621 /* The guest should not invalidate the host CPU's TLBs, fallback to interpreter. */
11622 /** @todo implement EMInterpretInvpcid() */
11623 return VERR_EM_INTERPRETER;
11624}
11625
11626
11627/**
11628 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
11629 * Error VM-exit.
11630 */
11631HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11632{
11633 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
11634 AssertRCReturn(rc, rc);
11635
11636 rc = hmR0VmxCheckVmcsCtls(pVCpu);
11637 AssertRCReturn(rc, rc);
11638
11639 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
11640 NOREF(uInvalidReason);
11641
11642#ifdef VBOX_STRICT
11643 uint32_t uIntrState;
11644 RTHCUINTREG uHCReg;
11645 uint64_t u64Val;
11646 uint32_t u32Val;
11647
11648 rc = hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
11649 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
11650 rc |= hmR0VmxReadEntryInstrLenVmcs(pVmxTransient);
11651 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
11652 AssertRCReturn(rc, rc);
11653
11654 Log4(("uInvalidReason %u\n", uInvalidReason));
11655 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
11656 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
11657 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
11658 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));
11659
11660 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
11661 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
11662 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
11663 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
11664 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
11665 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
11666 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
11667 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
11668 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
11669 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
11670 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
11671 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
11672#else
11673 NOREF(pVmxTransient);
11674#endif
11675
11676 HMDumpRegs(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
11677 return VERR_VMX_INVALID_GUEST_STATE;
11678}
11679
11680
11681/**
11682 * VM-exit handler for VM-entry failure due to an MSR-load
11683 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
11684 */
11685HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11686{
11687 NOREF(pVmxTransient);
11688 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
11689 HMVMX_RETURN_UNEXPECTED_EXIT();
11690}
11691
11692
11693/**
11694 * VM-exit handler for VM-entry failure due to a machine-check event
11695 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
11696 */
11697HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11698{
11699 NOREF(pVmxTransient);
11700 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
11701 HMVMX_RETURN_UNEXPECTED_EXIT();
11702}
11703
11704
11705/**
11706 * VM-exit handler for all undefined reasons. Should never ever happen.. in
11707 * theory.
11708 */
11709HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11710{
11711 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
11712 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient);
11713 return VERR_VMX_UNDEFINED_EXIT_CODE;
11714}
11715
11716
11717/**
11718 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
11719 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
11720 * Conditional VM-exit.
11721 */
11722HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11723{
11724 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11725
11726 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
11727 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
11728 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
11729 return VERR_EM_INTERPRETER;
11730 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11731 HMVMX_RETURN_UNEXPECTED_EXIT();
11732}
11733
11734
11735/**
11736 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
11737 */
11738HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11739{
11740 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11741
11742 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
11743 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
11744 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
11745 return VERR_EM_INTERPRETER;
11746 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11747 HMVMX_RETURN_UNEXPECTED_EXIT();
11748}
11749
11750
11751/**
11752 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
11753 */
11754HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11755{
11756 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11757
11758 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
11759 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11760 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
11761 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11762 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
11763 {
11764 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
11765 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
11766 }
11767 AssertRCReturn(rc, rc);
11768 Log4(("ecx=%#RX32\n", pMixedCtx->ecx));
11769
11770#ifdef VBOX_STRICT
11771 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
11772 {
11773 if ( hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)
11774 && pMixedCtx->ecx != MSR_K6_EFER)
11775 {
11776 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
11777 pMixedCtx->ecx));
11778 HMVMX_RETURN_UNEXPECTED_EXIT();
11779 }
11780# if HC_ARCH_BITS == 64
11781 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests
11782 && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
11783 {
11784 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
11785 HMVMX_RETURN_UNEXPECTED_EXIT();
11786 }
11787# endif
11788 }
11789#endif
11790
11791 PVM pVM = pVCpu->CTX_SUFF(pVM);
11792 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11793 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
11794 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
11795 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
11796 if (RT_SUCCESS(rc))
11797 {
11798 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11799 Assert(pVmxTransient->cbInstr == 2);
11800 }
11801 return rc;
11802}
11803
11804
11805/**
11806 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
11807 */
11808HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11809{
11810 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11811 PVM pVM = pVCpu->CTX_SUFF(pVM);
11812 int rc = VINF_SUCCESS;
11813
11814 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
11815 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
11816 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
11817 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
11818 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
11819 {
11820 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx);
11821 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
11822 }
11823 AssertRCReturn(rc, rc);
11824 Log4(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", pMixedCtx->ecx, pMixedCtx->edx, pMixedCtx->eax));
11825
11826 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11827 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
11828 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
11829
11830 if (RT_SUCCESS(rc))
11831 {
11832 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11833
11834 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
11835 if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START
11836 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
11837 {
11838 /* We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register
11839 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before
11840 EMInterpretWrmsr() changes it. */
11841 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
11842 }
11843 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
11844 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
11845 else if (pMixedCtx->ecx == MSR_K6_EFER)
11846 {
11847 /*
11848 * If the guest touches EFER we need to update the VM-Entry and VM-Exit controls as well,
11849 * even if it is -not- touching bits that cause paging mode changes (LMA/LME). We care about
11850 * the other bits as well, SCE and NXE. See @bugref{7368}.
11851 */
11852 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS);
11853 }
11854
11855 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */
11856 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
11857 {
11858 switch (pMixedCtx->ecx)
11859 {
11860 case MSR_IA32_SYSENTER_CS: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
11861 case MSR_IA32_SYSENTER_EIP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
11862 case MSR_IA32_SYSENTER_ESP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
11863 case MSR_K8_FS_BASE: /* no break */
11864 case MSR_K8_GS_BASE: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break;
11865 case MSR_K6_EFER: /* already handled above */ break;
11866 default:
11867 {
11868 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
11869 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
11870#if HC_ARCH_BITS == 64
11871 else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
11872 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
11873#endif
11874 break;
11875 }
11876 }
11877 }
11878#ifdef VBOX_STRICT
11879 else
11880 {
11881 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
11882 switch (pMixedCtx->ecx)
11883 {
11884 case MSR_IA32_SYSENTER_CS:
11885 case MSR_IA32_SYSENTER_EIP:
11886 case MSR_IA32_SYSENTER_ESP:
11887 case MSR_K8_FS_BASE:
11888 case MSR_K8_GS_BASE:
11889 {
11890 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
11891 HMVMX_RETURN_UNEXPECTED_EXIT();
11892 }
11893
11894 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
11895 default:
11896 {
11897 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
11898 {
11899 /* EFER writes are always intercepted, see hmR0VmxLoadGuestMsrs(). */
11900 if (pMixedCtx->ecx != MSR_K6_EFER)
11901 {
11902 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
11903 pMixedCtx->ecx));
11904 HMVMX_RETURN_UNEXPECTED_EXIT();
11905 }
11906 }
11907
11908#if HC_ARCH_BITS == 64
11909 if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
11910 {
11911 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
11912 HMVMX_RETURN_UNEXPECTED_EXIT();
11913 }
11914#endif
11915 break;
11916 }
11917 }
11918 }
11919#endif /* VBOX_STRICT */
11920 }
11921 return rc;
11922}
11923
11924
11925/**
11926 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
11927 */
11928HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11929{
11930 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11931
11932 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
11933 return VINF_EM_RAW_INTERRUPT;
11934}
11935
11936
11937/**
11938 * VM-exit handler for when the TPR value is lowered below the specified
11939 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
11940 */
11941HMVMX_EXIT_NSRC_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11942{
11943 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11944 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
11945
11946 /*
11947 * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
11948 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectPendingEvent() and
11949 * resume guest execution.
11950 */
11951 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
11952 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
11953 return VINF_SUCCESS;
11954}
11955
11956
11957/**
11958 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
11959 * VM-exit.
11960 *
11961 * @retval VINF_SUCCESS when guest execution can continue.
11962 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
11963 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
11964 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
11965 * interpreter.
11966 */
11967HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11968{
11969 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11970 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
11971 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11972 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11973 AssertRCReturn(rc, rc);
11974
11975 RTGCUINTPTR const uExitQualification = pVmxTransient->uExitQualification;
11976 uint32_t const uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
11977 PVM pVM = pVCpu->CTX_SUFF(pVM);
11978 VBOXSTRICTRC rcStrict;
11979 rc = hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, true /*fNeedRsp*/);
11980 switch (uAccessType)
11981 {
11982 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */
11983 {
11984 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
11985 AssertRCReturn(rc, rc);
11986
11987 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr,
11988 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
11989 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
11990 AssertMsg( rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT || rcStrict == VINF_PGM_CHANGE_MODE
11991 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11992 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
11993 {
11994 case 0: /* CR0 */
11995 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
11996 Log4(("CRX CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr0));
11997 break;
11998 case 2: /* CR2 */
11999 /* Nothing to do here, CR2 it's not part of the VMCS. */
12000 break;
12001 case 3: /* CR3 */
12002 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx) || pVCpu->hm.s.fUsingDebugLoop);
12003 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3);
12004 Log4(("CRX CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr3));
12005 break;
12006 case 4: /* CR4 */
12007 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);
12008 Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n",
12009 VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
12010 break;
12011 case 8: /* CR8 */
12012 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
12013 /* CR8 contains the APIC TPR. Was updated by IEMExecDecodedMovCRxWrite(). */
12014 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
12015 break;
12016 default:
12017 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
12018 break;
12019 }
12020
12021 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
12022 break;
12023 }
12024
12025 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */
12026 {
12027 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
12028 AssertRCReturn(rc, rc);
12029
12030 Assert( !pVM->hm.s.fNestedPaging
12031 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
12032 || pVCpu->hm.s.fUsingDebugLoop
12033 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
12034
12035 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
12036 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
12037 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
12038
12039 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr,
12040 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
12041 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
12042 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12043 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
12044 Log4(("CRX CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
12045 VBOXSTRICTRC_VAL(rcStrict)));
12046 break;
12047 }
12048
12049 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
12050 {
12051 AssertRCReturn(rc, rc);
12052 rcStrict = IEMExecDecodedClts(pVCpu, pVmxTransient->cbInstr);
12053 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12054 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
12055 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
12056 Log4(("CRX CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
12057 break;
12058 }
12059
12060 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
12061 {
12062 AssertRCReturn(rc, rc);
12063 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr,
12064 VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
12065 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT || rcStrict == VINF_PGM_CHANGE_MODE,
12066 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12067 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
12068 Log4(("CRX LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
12069 break;
12070 }
12071
12072 default:
12073 AssertMsgFailedReturn(("Invalid access-type in Mov CRx VM-exit qualification %#x\n", uAccessType),
12074 VERR_VMX_UNEXPECTED_EXCEPTION);
12075 }
12076
12077 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);
12078 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
12079 NOREF(pVM);
12080 return rcStrict;
12081}
12082
12083
12084/**
12085 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
12086 * VM-exit.
12087 */
12088HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12089{
12090 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12091 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
12092
12093 int rc2 = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12094 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12095 rc2 |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
12096 rc2 |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
12097 rc2 |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
12098 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
12099 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
12100 AssertRCReturn(rc2, rc2);
12101
12102 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
12103 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
12104 uint8_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
12105 bool fIOWrite = ( VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
12106 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
12107 bool fIOString = VMX_EXIT_QUALIFICATION_IO_IS_STRING(pVmxTransient->uExitQualification);
12108 bool fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
12109 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction;
12110 AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_VMX_IPE_1);
12111
12112 /* I/O operation lookup arrays. */
12113 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */
12114 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
12115
12116 VBOXSTRICTRC rcStrict;
12117 uint32_t const cbValue = s_aIOSizes[uIOWidth];
12118 uint32_t const cbInstr = pVmxTransient->cbInstr;
12119 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
12120 PVM pVM = pVCpu->CTX_SUFF(pVM);
12121 if (fIOString)
12122 {
12123#ifdef VBOX_WITH_2ND_IEM_STEP /* This used to gurus with debian 32-bit guest without NP (on ATA reads).
12124 See @bugref{5752#c158}. Should work now. */
12125 /*
12126 * INS/OUTS - I/O String instruction.
12127 *
12128 * Use instruction-information if available, otherwise fall back on
12129 * interpreting the instruction.
12130 */
12131 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue,
12132 fIOWrite ? 'w' : 'r'));
12133 AssertReturn(pMixedCtx->dx == uIOPort, VERR_VMX_IPE_2);
12134 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo))
12135 {
12136 rc2 = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
12137 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
12138 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
12139 AssertRCReturn(rc2, rc2);
12140 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
12141 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
12142 IEMMODE enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
12143 bool fRep = VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification);
12144 if (fIOWrite)
12145 {
12146 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
12147 pVmxTransient->ExitInstrInfo.StrIo.iSegReg);
12148 }
12149 else
12150 {
12151 /*
12152 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
12153 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
12154 * See Intel Instruction spec. for "INS".
12155 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
12156 */
12157 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr);
12158 }
12159 }
12160 else
12161 {
12162 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
12163 rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
12164 AssertRCReturn(rc2, rc2);
12165 rcStrict = IEMExecOne(pVCpu);
12166 }
12167 /** @todo IEM needs to be setting these flags somehow. */
12168 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
12169 fUpdateRipAlready = true;
12170#else
12171 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
12172 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL /* pcbInstr */);
12173 if (RT_SUCCESS(rcStrict))
12174 {
12175 if (fIOWrite)
12176 {
12177 rcStrict = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
12178 (DISCPUMODE)pDis->uAddrMode, cbValue);
12179 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
12180 }
12181 else
12182 {
12183 rcStrict = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
12184 (DISCPUMODE)pDis->uAddrMode, cbValue);
12185 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
12186 }
12187 }
12188 else
12189 {
12190 AssertMsg(rcStrict == VERR_EM_INTERPRETER, ("rcStrict=%Rrc RIP=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict),
12191 pMixedCtx->rip));
12192 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
12193 }
12194#endif
12195 }
12196 else
12197 {
12198 /*
12199 * IN/OUT - I/O instruction.
12200 */
12201 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
12202 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
12203 Assert(!VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification));
12204 if (fIOWrite)
12205 {
12206 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue);
12207 if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
12208 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
12209 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
12210 }
12211 else
12212 {
12213 uint32_t u32Result = 0;
12214 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
12215 if (IOM_SUCCESS(rcStrict))
12216 {
12217 /* Save result of I/O IN instr. in AL/AX/EAX. */
12218 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
12219 }
12220 else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
12221 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
12222 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
12223 }
12224 }
12225
12226 if (IOM_SUCCESS(rcStrict))
12227 {
12228 if (!fUpdateRipAlready)
12229 {
12230 pMixedCtx->rip += cbInstr;
12231 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);
12232 }
12233
12234 /*
12235 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru while booting Fedora 17 64-bit guest.
12236 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
12237 */
12238 if (fIOString)
12239 {
12240 /** @todo Single-step for INS/OUTS with REP prefix? */
12241 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
12242 }
12243 else if ( !fDbgStepping
12244 && fGstStepping)
12245 {
12246 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
12247 }
12248
12249 /*
12250 * If any I/O breakpoints are armed, we need to check if one triggered
12251 * and take appropriate action.
12252 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
12253 */
12254 rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
12255 AssertRCReturn(rc2, rc2);
12256
12257 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
12258 * execution engines about whether hyper BPs and such are pending. */
12259 uint32_t const uDr7 = pMixedCtx->dr[7];
12260 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
12261 && X86_DR7_ANY_RW_IO(uDr7)
12262 && (pMixedCtx->cr4 & X86_CR4_DE))
12263 || DBGFBpIsHwIoArmed(pVM)))
12264 {
12265 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
12266
12267 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
12268 VMMRZCallRing3Disable(pVCpu);
12269 HM_DISABLE_PREEMPT();
12270
12271 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
12272
12273 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue);
12274 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
12275 {
12276 /* Raise #DB. */
12277 if (fIsGuestDbgActive)
12278 ASMSetDR6(pMixedCtx->dr[6]);
12279 if (pMixedCtx->dr[7] != uDr7)
12280 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
12281
12282 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
12283 }
12284 /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */
12285 else if ( rcStrict2 != VINF_SUCCESS
12286 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
12287 rcStrict = rcStrict2;
12288
12289 HM_RESTORE_PREEMPT();
12290 VMMRZCallRing3Enable(pVCpu);
12291 }
12292 }
12293
12294#ifdef VBOX_STRICT
12295 if (rcStrict == VINF_IOM_R3_IOPORT_READ)
12296 Assert(!fIOWrite);
12297 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
12298 Assert(fIOWrite);
12299 else
12300 {
12301#if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
12302 * statuses, that the VMM device and some others may return. See
12303 * IOM_SUCCESS() for guidance. */
12304 AssertMsg( RT_FAILURE(rcStrict)
12305 || rcStrict == VINF_SUCCESS
12306 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
12307 || rcStrict == VINF_EM_DBG_BREAKPOINT
12308 || rcStrict == VINF_EM_RAW_GUEST_TRAP
12309 || rcStrict == VINF_EM_RAW_TO_R3
12310 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12311#endif
12312 }
12313#endif
12314
12315 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
12316 return rcStrict;
12317}
12318
12319
12320/**
12321 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
12322 * VM-exit.
12323 */
12324HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12325{
12326 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12327
12328 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
12329 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12330 AssertRCReturn(rc, rc);
12331 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
12332 {
12333 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
12334 AssertRCReturn(rc, rc);
12335 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
12336 {
12337 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
12338
12339 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
12340 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
12341
12342 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */
12343 Assert(!pVCpu->hm.s.Event.fPending);
12344 pVCpu->hm.s.Event.fPending = true;
12345 pVCpu->hm.s.Event.u64IntInfo = pVmxTransient->uIdtVectoringInfo;
12346 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
12347 AssertRCReturn(rc, rc);
12348 if (fErrorCodeValid)
12349 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
12350 else
12351 pVCpu->hm.s.Event.u32ErrCode = 0;
12352 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
12353 && uVector == X86_XCPT_PF)
12354 {
12355 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2;
12356 }
12357
12358 Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
12359 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
12360 return VINF_EM_RAW_INJECT_TRPM_EVENT;
12361 }
12362 }
12363
12364 /** @todo Emulate task switch someday, currently just going back to ring-3 for
12365 * emulation. */
12366 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
12367 return VERR_EM_INTERPRETER;
12368}
12369
12370
12371/**
12372 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
12373 */
12374HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12375{
12376 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12377 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
12378 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
12379 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
12380 AssertRCReturn(rc, rc);
12381 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
12382 return VINF_EM_DBG_STEPPED;
12383}
12384
12385
12386/**
12387 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
12388 */
12389HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12390{
12391 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12392
12393 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
12394 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
12395 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
12396 { /* likely */ }
12397 else
12398 {
12399 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
12400 rcStrict1 = VINF_SUCCESS;
12401 return rcStrict1;
12402 }
12403
12404#if 0
12405 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
12406 * just sync the whole thing. */
12407 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
12408#else
12409 /* Aggressive state sync. for now. */
12410 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
12411 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
12412 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
12413#endif
12414 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12415 AssertRCReturn(rc, rc);
12416
12417 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
12418 uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
12419 VBOXSTRICTRC rcStrict2;
12420 switch (uAccessType)
12421 {
12422 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
12423 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
12424 {
12425 AssertMsg( !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
12426 || VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) != 0x80,
12427 ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
12428
12429 RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
12430 GCPhys &= PAGE_BASE_GC_MASK;
12431 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
12432 PVM pVM = pVCpu->CTX_SUFF(pVM);
12433 Log4(("ApicAccess uAccessType=%#x GCPhys=%#RGv Off=%#x\n", uAccessType, GCPhys,
12434 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
12435
12436 rcStrict2 = IOMMMIOPhysHandler(pVM, pVCpu,
12437 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW,
12438 CPUMCTX2CORE(pMixedCtx), GCPhys);
12439 Log4(("ApicAccess rcStrict2=%d\n", VBOXSTRICTRC_VAL(rcStrict2)));
12440 if ( rcStrict2 == VINF_SUCCESS
12441 || rcStrict2 == VERR_PAGE_TABLE_NOT_PRESENT
12442 || rcStrict2 == VERR_PAGE_NOT_PRESENT)
12443 {
12444 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12445 | HM_CHANGED_GUEST_RSP
12446 | HM_CHANGED_GUEST_RFLAGS
12447 | HM_CHANGED_VMX_GUEST_APIC_STATE);
12448 rcStrict2 = VINF_SUCCESS;
12449 }
12450 break;
12451 }
12452
12453 default:
12454 Log4(("ApicAccess uAccessType=%#x\n", uAccessType));
12455 rcStrict2 = VINF_EM_RAW_EMULATE_INSTR;
12456 break;
12457 }
12458
12459 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
12460 if (rcStrict2 != VINF_SUCCESS)
12461 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchApicAccessToR3);
12462 return rcStrict2;
12463}
12464
12465
12466/**
12467 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
12468 * VM-exit.
12469 */
12470HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12471{
12472 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12473
12474 /* We should -not- get this VM-exit if the guest's debug registers were active. */
12475 if (pVmxTransient->fWasGuestDebugStateActive)
12476 {
12477 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
12478 HMVMX_RETURN_UNEXPECTED_EXIT();
12479 }
12480
12481 if ( !pVCpu->hm.s.fSingleInstruction
12482 && !pVmxTransient->fWasHyperDebugStateActive)
12483 {
12484 Assert(!DBGFIsStepping(pVCpu));
12485 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB));
12486
12487 /* Don't intercept MOV DRx any more. */
12488 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
12489 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
12490 AssertRCReturn(rc, rc);
12491
12492 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
12493 VMMRZCallRing3Disable(pVCpu);
12494 HM_DISABLE_PREEMPT();
12495
12496 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
12497 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
12498 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
12499
12500 HM_RESTORE_PREEMPT();
12501 VMMRZCallRing3Enable(pVCpu);
12502
12503#ifdef VBOX_WITH_STATISTICS
12504 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12505 AssertRCReturn(rc, rc);
12506 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
12507 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
12508 else
12509 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
12510#endif
12511 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
12512 return VINF_SUCCESS;
12513 }
12514
12515 /*
12516 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date.
12517 * Update the segment registers and DR7 from the CPU.
12518 */
12519 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12520 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
12521 rc |= hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
12522 AssertRCReturn(rc, rc);
12523 Log4(("CS:RIP=%04x:%08RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
12524
12525 PVM pVM = pVCpu->CTX_SUFF(pVM);
12526 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
12527 {
12528 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
12529 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
12530 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
12531 if (RT_SUCCESS(rc))
12532 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);
12533 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
12534 }
12535 else
12536 {
12537 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
12538 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
12539 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
12540 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
12541 }
12542
12543 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
12544 if (RT_SUCCESS(rc))
12545 {
12546 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
12547 AssertRCReturn(rc2, rc2);
12548 return VINF_SUCCESS;
12549 }
12550 return rc;
12551}
12552
12553
12554/**
12555 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
12556 * Conditional VM-exit.
12557 */
12558HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12559{
12560 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12561 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
12562
12563 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
12564 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
12565 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
12566 { /* likely */ }
12567 else
12568 {
12569 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
12570 rcStrict1 = VINF_SUCCESS;
12571 return rcStrict1;
12572 }
12573
12574 RTGCPHYS GCPhys = 0;
12575 int rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
12576
12577#if 0
12578 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
12579#else
12580 /* Aggressive state sync. for now. */
12581 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
12582 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
12583 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
12584#endif
12585 AssertRCReturn(rc, rc);
12586
12587 /*
12588 * If we succeed, resume guest execution.
12589 * If we fail in interpreting the instruction because we couldn't get the guest physical address
12590 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
12591 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
12592 * weird case. See @bugref{6043}.
12593 */
12594 PVM pVM = pVCpu->CTX_SUFF(pVM);
12595 VBOXSTRICTRC rcStrict2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
12596 Log4(("EPT misconfig at %#RGv RIP=%#RX64 rc=%Rrc\n", GCPhys, pMixedCtx->rip, VBOXSTRICTRC_VAL(rcStrict2)));
12597 if ( rcStrict2 == VINF_SUCCESS
12598 || rcStrict2 == VERR_PAGE_TABLE_NOT_PRESENT
12599 || rcStrict2 == VERR_PAGE_NOT_PRESENT)
12600 {
12601 /* Successfully handled MMIO operation. */
12602 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12603 | HM_CHANGED_GUEST_RSP
12604 | HM_CHANGED_GUEST_RFLAGS
12605 | HM_CHANGED_VMX_GUEST_APIC_STATE);
12606 return VINF_SUCCESS;
12607 }
12608 return rcStrict2;
12609}
12610
12611
12612/**
12613 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
12614 * VM-exit.
12615 */
12616HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12617{
12618 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12619 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
12620
12621 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
12622 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
12623 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
12624 { /* likely */ }
12625 else
12626 {
12627 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
12628 rcStrict1 = VINF_SUCCESS;
12629 return rcStrict1;
12630 }
12631
12632 RTGCPHYS GCPhys = 0;
12633 int rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
12634 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12635#if 0
12636 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
12637#else
12638 /* Aggressive state sync. for now. */
12639 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
12640 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
12641 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
12642#endif
12643 AssertRCReturn(rc, rc);
12644
12645 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
12646 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
12647
12648 RTGCUINT uErrorCode = 0;
12649 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
12650 uErrorCode |= X86_TRAP_PF_ID;
12651 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
12652 uErrorCode |= X86_TRAP_PF_RW;
12653 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
12654 uErrorCode |= X86_TRAP_PF_P;
12655
12656 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
12657
12658 Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys,
12659 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
12660
12661 /* Handle the pagefault trap for the nested shadow table. */
12662 PVM pVM = pVCpu->CTX_SUFF(pVM);
12663 VBOXSTRICTRC rcStrict2 = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
12664 TRPMResetTrap(pVCpu);
12665
12666 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
12667 if ( rcStrict2 == VINF_SUCCESS
12668 || rcStrict2 == VERR_PAGE_TABLE_NOT_PRESENT
12669 || rcStrict2 == VERR_PAGE_NOT_PRESENT)
12670 {
12671 /* Successfully synced our nested page tables. */
12672 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
12673 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
12674 | HM_CHANGED_GUEST_RSP
12675 | HM_CHANGED_GUEST_RFLAGS);
12676 return VINF_SUCCESS;
12677 }
12678
12679 Log4(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
12680 return rcStrict2;
12681}
12682
12683/** @} */
12684
12685/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
12686/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
12687/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
12688
12689/** @name VM-exit exception handlers.
12690 * @{
12691 */
12692
12693/**
12694 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
12695 */
12696static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12697{
12698 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12699 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
12700
12701 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
12702 AssertRCReturn(rc, rc);
12703
12704 if (!(pMixedCtx->cr0 & X86_CR0_NE))
12705 {
12706 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
12707 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
12708
12709 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
12710 * provides VM-exit instruction length. If this causes problem later,
12711 * disassemble the instruction like it's done on AMD-V. */
12712 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
12713 AssertRCReturn(rc2, rc2);
12714 return rc;
12715 }
12716
12717 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12718 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
12719 return rc;
12720}
12721
12722
12723/**
12724 * VM-exit exception handler for \#BP (Breakpoint exception).
12725 */
12726static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12727{
12728 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12729 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
12730
12731 /** @todo Try optimize this by not saving the entire guest state unless
12732 * really needed. */
12733 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
12734 AssertRCReturn(rc, rc);
12735
12736 PVM pVM = pVCpu->CTX_SUFF(pVM);
12737 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
12738 if (rc == VINF_EM_RAW_GUEST_TRAP)
12739 {
12740 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
12741 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12742 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12743 AssertRCReturn(rc, rc);
12744
12745 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12746 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
12747 }
12748
12749 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
12750 return rc;
12751}
12752
12753
12754/**
12755 * VM-exit exception handler for \#AC (alignment check exception).
12756 */
12757static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12758{
12759 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12760
12761 /*
12762 * Re-inject it. We'll detect any nesting before getting here.
12763 */
12764 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12765 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12766 AssertRCReturn(rc, rc);
12767 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
12768
12769 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12770 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
12771 return VINF_SUCCESS;
12772}
12773
12774
12775/**
12776 * VM-exit exception handler for \#DB (Debug exception).
12777 */
12778static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12779{
12780 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12781 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
12782 Log6(("XcptDB\n"));
12783
12784 /*
12785 * Get the DR6-like values from the VM-exit qualification and pass it to DBGF
12786 * for processing.
12787 */
12788 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12789 AssertRCReturn(rc, rc);
12790
12791 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
12792 uint64_t uDR6 = X86_DR6_INIT_VAL;
12793 uDR6 |= ( pVmxTransient->uExitQualification
12794 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
12795
12796 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
12797 if (rc == VINF_EM_RAW_GUEST_TRAP)
12798 {
12799 /*
12800 * The exception was for the guest. Update DR6, DR7.GD and
12801 * IA32_DEBUGCTL.LBR before forwarding it.
12802 * (See Intel spec. 27.1 "Architectural State before a VM-Exit".)
12803 */
12804 VMMRZCallRing3Disable(pVCpu);
12805 HM_DISABLE_PREEMPT();
12806
12807 pMixedCtx->dr[6] &= ~X86_DR6_B_MASK;
12808 pMixedCtx->dr[6] |= uDR6;
12809 if (CPUMIsGuestDebugStateActive(pVCpu))
12810 ASMSetDR6(pMixedCtx->dr[6]);
12811
12812 HM_RESTORE_PREEMPT();
12813 VMMRZCallRing3Enable(pVCpu);
12814
12815 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
12816 AssertRCReturn(rc, rc);
12817
12818 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
12819 pMixedCtx->dr[7] &= ~X86_DR7_GD;
12820
12821 /* Paranoia. */
12822 pMixedCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
12823 pMixedCtx->dr[7] |= X86_DR7_RA1_MASK;
12824
12825 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
12826 AssertRCReturn(rc, rc);
12827
12828 /*
12829 * Raise #DB in the guest.
12830 *
12831 * It is important to reflect what the VM-exit gave us (preserving the interruption-type) rather than use
12832 * hmR0VmxSetPendingXcptDB() as the #DB could've been raised while executing ICEBP and not the 'normal' #DB.
12833 * Thus it -may- trigger different handling in the CPU (like skipped DPL checks). See @bugref{6398}.
12834 *
12835 * Since ICEBP isn't documented on Intel, see AMD spec. 15.20 "Event Injection".
12836 */
12837 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
12838 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12839 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12840 AssertRCReturn(rc, rc);
12841 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12842 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
12843 return VINF_SUCCESS;
12844 }
12845
12846 /*
12847 * Not a guest trap, must be a hypervisor related debug event then.
12848 * Update DR6 in case someone is interested in it.
12849 */
12850 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
12851 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
12852 CPUMSetHyperDR6(pVCpu, uDR6);
12853
12854 return rc;
12855}
12856
12857
12858/**
12859 * VM-exit exception handler for \#NM (Device-not-available exception: floating
12860 * point exception).
12861 */
12862static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12863{
12864 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12865
12866 /* We require CR0 and EFER. EFER is always up-to-date. */
12867 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
12868 AssertRCReturn(rc, rc);
12869
12870 /* We're playing with the host CPU state here, have to disable preemption or longjmp. */
12871 VMMRZCallRing3Disable(pVCpu);
12872 HM_DISABLE_PREEMPT();
12873
12874 /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */
12875 if (pVmxTransient->fWasGuestFPUStateActive)
12876 {
12877 rc = VINF_EM_RAW_GUEST_TRAP;
12878 Assert(CPUMIsGuestFPUStateActive(pVCpu) || HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0));
12879 }
12880 else
12881 {
12882#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
12883 Assert(!pVmxTransient->fWasGuestFPUStateActive || pVCpu->hm.s.fUsingDebugLoop);
12884#endif
12885 rc = CPUMR0Trap07Handler(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
12886 Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu)));
12887 }
12888
12889 HM_RESTORE_PREEMPT();
12890 VMMRZCallRing3Enable(pVCpu);
12891
12892 if (rc == VINF_SUCCESS)
12893 {
12894 /* Guest FPU state was activated, we'll want to change CR0 FPU intercepts before the next VM-reentry. */
12895 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);
12896 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
12897 pVCpu->hm.s.fPreloadGuestFpu = true;
12898 }
12899 else
12900 {
12901 /* Forward #NM to the guest. */
12902 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
12903 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
12904 AssertRCReturn(rc, rc);
12905 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12906 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
12907 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
12908 }
12909
12910 return VINF_SUCCESS;
12911}
12912
12913
12914/**
12915 * VM-exit exception handler for \#GP (General-protection exception).
12916 *
12917 * @remarks Requires pVmxTransient->uExitIntInfo to be up-to-date.
12918 */
12919static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12920{
12921 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12922 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
12923
12924 int rc;
12925 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
12926 { /* likely */ }
12927 else
12928 {
12929#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
12930 Assert(pVCpu->hm.s.fUsingDebugLoop);
12931#endif
12932 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
12933 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
12934 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12935 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12936 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
12937 AssertRCReturn(rc, rc);
12938 Log4(("#GP Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip,
12939 pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));
12940 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12941 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
12942 return rc;
12943 }
12944
12945 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
12946 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
12947
12948 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
12949 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
12950 AssertRCReturn(rc, rc);
12951
12952 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
12953 uint32_t cbOp = 0;
12954 PVM pVM = pVCpu->CTX_SUFF(pVM);
12955 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction;
12956 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
12957 if (RT_SUCCESS(rc))
12958 {
12959 rc = VINF_SUCCESS;
12960 Assert(cbOp == pDis->cbInstr);
12961 Log4(("#GP Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
12962 switch (pDis->pCurInstr->uOpcode)
12963 {
12964 case OP_CLI:
12965 {
12966 pMixedCtx->eflags.Bits.u1IF = 0;
12967 pMixedCtx->eflags.Bits.u1RF = 0;
12968 pMixedCtx->rip += pDis->cbInstr;
12969 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
12970 if ( !fDbgStepping
12971 && pMixedCtx->eflags.Bits.u1TF)
12972 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
12973 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
12974 break;
12975 }
12976
12977 case OP_STI:
12978 {
12979 bool fOldIF = pMixedCtx->eflags.Bits.u1IF;
12980 pMixedCtx->eflags.Bits.u1IF = 1;
12981 pMixedCtx->eflags.Bits.u1RF = 0;
12982 pMixedCtx->rip += pDis->cbInstr;
12983 if (!fOldIF)
12984 {
12985 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
12986 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
12987 }
12988 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
12989 if ( !fDbgStepping
12990 && pMixedCtx->eflags.Bits.u1TF)
12991 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
12992 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
12993 break;
12994 }
12995
12996 case OP_HLT:
12997 {
12998 rc = VINF_EM_HALT;
12999 pMixedCtx->rip += pDis->cbInstr;
13000 pMixedCtx->eflags.Bits.u1RF = 0;
13001 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13002 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
13003 break;
13004 }
13005
13006 case OP_POPF:
13007 {
13008 Log4(("POPF CS:EIP %04x:%04RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
13009 uint32_t cbParm;
13010 uint32_t uMask;
13011 bool fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
13012 if (pDis->fPrefix & DISPREFIX_OPSIZE)
13013 {
13014 cbParm = 4;
13015 uMask = 0xffffffff;
13016 }
13017 else
13018 {
13019 cbParm = 2;
13020 uMask = 0xffff;
13021 }
13022
13023 /* Get the stack pointer & pop the contents of the stack onto Eflags. */
13024 RTGCPTR GCPtrStack = 0;
13025 X86EFLAGS Eflags;
13026 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
13027 &GCPtrStack);
13028 if (RT_SUCCESS(rc))
13029 {
13030 Assert(sizeof(Eflags.u32) >= cbParm);
13031 Eflags.u32 = 0;
13032 rc = VBOXSTRICTRC_TODO(PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u32, cbParm, PGMACCESSORIGIN_HM));
13033 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc)); /** @todo allow strict return codes here */
13034 }
13035 if (RT_FAILURE(rc))
13036 {
13037 rc = VERR_EM_INTERPRETER;
13038 break;
13039 }
13040 Log4(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
13041 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF))
13042 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask);
13043 pMixedCtx->esp += cbParm;
13044 pMixedCtx->esp &= uMask;
13045 pMixedCtx->rip += pDis->cbInstr;
13046 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
13047 | HM_CHANGED_GUEST_RSP
13048 | HM_CHANGED_GUEST_RFLAGS);
13049 /* Generate a pending-debug exception when the guest stepping over POPF regardless of how
13050 POPF restores EFLAGS.TF. */
13051 if ( !fDbgStepping
13052 && fGstStepping)
13053 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
13054 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
13055 break;
13056 }
13057
13058 case OP_PUSHF:
13059 {
13060 uint32_t cbParm;
13061 uint32_t uMask;
13062 if (pDis->fPrefix & DISPREFIX_OPSIZE)
13063 {
13064 cbParm = 4;
13065 uMask = 0xffffffff;
13066 }
13067 else
13068 {
13069 cbParm = 2;
13070 uMask = 0xffff;
13071 }
13072
13073 /* Get the stack pointer & push the contents of eflags onto the stack. */
13074 RTGCPTR GCPtrStack = 0;
13075 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
13076 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
13077 if (RT_FAILURE(rc))
13078 {
13079 rc = VERR_EM_INTERPRETER;
13080 break;
13081 }
13082 X86EFLAGS Eflags = pMixedCtx->eflags;
13083 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
13084 Eflags.Bits.u1RF = 0;
13085 Eflags.Bits.u1VM = 0;
13086
13087 rc = VBOXSTRICTRC_TODO(PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u, cbParm, PGMACCESSORIGIN_HM));
13088 if (RT_UNLIKELY(rc != VINF_SUCCESS))
13089 {
13090 AssertMsgFailed(("%Rrc\n", rc)); /** @todo allow strict return codes here */
13091 rc = VERR_EM_INTERPRETER;
13092 break;
13093 }
13094 Log4(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack));
13095 pMixedCtx->esp -= cbParm;
13096 pMixedCtx->esp &= uMask;
13097 pMixedCtx->rip += pDis->cbInstr;
13098 pMixedCtx->eflags.Bits.u1RF = 0;
13099 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
13100 | HM_CHANGED_GUEST_RSP
13101 | HM_CHANGED_GUEST_RFLAGS);
13102 if ( !fDbgStepping
13103 && pMixedCtx->eflags.Bits.u1TF)
13104 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
13105 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
13106 break;
13107 }
13108
13109 case OP_IRET:
13110 {
13111 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
13112 * instruction reference. */
13113 RTGCPTR GCPtrStack = 0;
13114 uint32_t uMask = 0xffff;
13115 bool fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
13116 uint16_t aIretFrame[3];
13117 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
13118 {
13119 rc = VERR_EM_INTERPRETER;
13120 break;
13121 }
13122 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
13123 &GCPtrStack);
13124 if (RT_SUCCESS(rc))
13125 {
13126 rc = VBOXSTRICTRC_TODO(PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame),
13127 PGMACCESSORIGIN_HM));
13128 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc)); /** @todo allow strict return codes here */
13129 }
13130 if (RT_FAILURE(rc))
13131 {
13132 rc = VERR_EM_INTERPRETER;
13133 break;
13134 }
13135 pMixedCtx->eip = 0;
13136 pMixedCtx->ip = aIretFrame[0];
13137 pMixedCtx->cs.Sel = aIretFrame[1];
13138 pMixedCtx->cs.ValidSel = aIretFrame[1];
13139 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
13140 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF))
13141 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
13142 pMixedCtx->sp += sizeof(aIretFrame);
13143 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
13144 | HM_CHANGED_GUEST_SEGMENT_REGS
13145 | HM_CHANGED_GUEST_RSP
13146 | HM_CHANGED_GUEST_RFLAGS);
13147 /* Generate a pending-debug exception when stepping over IRET regardless of how IRET modifies EFLAGS.TF. */
13148 if ( !fDbgStepping
13149 && fGstStepping)
13150 hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
13151 Log4(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
13152 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
13153 break;
13154 }
13155
13156 case OP_INT:
13157 {
13158 uint16_t uVector = pDis->Param1.uValue & 0xff;
13159 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
13160 /* INT clears EFLAGS.TF, we must not set any pending debug exceptions here. */
13161 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
13162 break;
13163 }
13164
13165 case OP_INTO:
13166 {
13167 if (pMixedCtx->eflags.Bits.u1OF)
13168 {
13169 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
13170 /* INTO clears EFLAGS.TF, we must not set any pending debug exceptions here. */
13171 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
13172 }
13173 else
13174 {
13175 pMixedCtx->eflags.Bits.u1RF = 0;
13176 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
13177 }
13178 break;
13179 }
13180
13181 default:
13182 {
13183 pMixedCtx->eflags.Bits.u1RF = 0; /* This is correct most of the time... */
13184 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
13185 EMCODETYPE_SUPERVISOR);
13186 rc = VBOXSTRICTRC_VAL(rc2);
13187 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
13188 /** @todo We have to set pending-debug exceptions here when the guest is
13189 * single-stepping depending on the instruction that was interpreted. */
13190 Log4(("#GP rc=%Rrc\n", rc));
13191 break;
13192 }
13193 }
13194 }
13195 else
13196 rc = VERR_EM_INTERPRETER;
13197
13198 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
13199 ("#GP Unexpected rc=%Rrc\n", rc));
13200 return rc;
13201}
13202
13203
13204/**
13205 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
13206 * the exception reported in the VMX transient structure back into the VM.
13207 *
13208 * @remarks Requires uExitIntInfo in the VMX transient structure to be
13209 * up-to-date.
13210 */
13211static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
13212{
13213 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
13214#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
13215 Assert(pVCpu->hm.s.fUsingDebugLoop);
13216#endif
13217
13218 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
13219 hmR0VmxCheckExitDueToEventDelivery(). */
13220 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
13221 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13222 AssertRCReturn(rc, rc);
13223 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
13224
13225#ifdef DEBUG_ramshankar
13226 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
13227 uint8_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
13228 Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip));
13229#endif
13230
13231 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
13232 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
13233 return VINF_SUCCESS;
13234}
13235
13236
13237/**
13238 * VM-exit exception handler for \#PF (Page-fault exception).
13239 */
13240static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
13241{
13242 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
13243 PVM pVM = pVCpu->CTX_SUFF(pVM);
13244 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
13245 rc |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
13246 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
13247 AssertRCReturn(rc, rc);
13248
13249 if (!pVM->hm.s.fNestedPaging)
13250 { /* likely */ }
13251 else
13252 {
13253#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF)
13254 Assert(pVCpu->hm.s.fUsingDebugLoop);
13255#endif
13256 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
13257 if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF))
13258 {
13259 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
13260 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
13261 0 /* cbInstr */, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQualification);
13262 }
13263 else
13264 {
13265 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
13266 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
13267 Log4(("Pending #DF due to vectoring #PF. NP\n"));
13268 }
13269 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
13270 return rc;
13271 }
13272
13273 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
13274 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
13275 if (pVmxTransient->fVectoringPF)
13276 {
13277 Assert(pVCpu->hm.s.Event.fPending);
13278 return VINF_EM_RAW_INJECT_TRPM_EVENT;
13279 }
13280
13281 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
13282 AssertRCReturn(rc, rc);
13283
13284 Log4(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
13285 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntErrorCode, pMixedCtx->cr3));
13286
13287 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
13288 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pMixedCtx),
13289 (RTGCPTR)pVmxTransient->uExitQualification);
13290
13291 Log4(("#PF: rc=%Rrc\n", rc));
13292 if (rc == VINF_SUCCESS)
13293 {
13294 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
13295 /** @todo this isn't quite right, what if guest does lgdt with some MMIO
13296 * memory? We don't update the whole state here... */
13297 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP
13298 | HM_CHANGED_GUEST_RSP
13299 | HM_CHANGED_GUEST_RFLAGS
13300 | HM_CHANGED_VMX_GUEST_APIC_STATE);
13301 TRPMResetTrap(pVCpu);
13302 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
13303 return rc;
13304 }
13305
13306 if (rc == VINF_EM_RAW_GUEST_TRAP)
13307 {
13308 if (!pVmxTransient->fVectoringDoublePF)
13309 {
13310 /* It's a guest page fault and needs to be reflected to the guest. */
13311 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
13312 TRPMResetTrap(pVCpu);
13313 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
13314 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
13315 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
13316 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
13317 }
13318 else
13319 {
13320 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
13321 TRPMResetTrap(pVCpu);
13322 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
13323 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
13324 Log4(("#PF: Pending #DF due to vectoring #PF\n"));
13325 }
13326
13327 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
13328 return VINF_SUCCESS;
13329 }
13330
13331 TRPMResetTrap(pVCpu);
13332 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
13333 return rc;
13334}
13335
13336/** @} */
13337
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette