VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 72969

Last change on this file since 72969 was 72967, checked in by vboxsync, 6 years ago

VMM/HMSVM: bugref:9193 Stop passing pCtx around and use pVCpu->cpum.GstCtx instead where possible.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 576.3 KB
Line 
1/* $Id: HMVMXR0.cpp 72967 2018-07-08 10:38:08Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/x86.h>
25#include <iprt/asm-amd64-x86.h>
26#include <iprt/thread.h>
27
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/dbgf.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/iom.h>
32#include <VBox/vmm/selm.h>
33#include <VBox/vmm/tm.h>
34#include <VBox/vmm/gim.h>
35#include <VBox/vmm/apic.h>
36#ifdef VBOX_WITH_REM
37# include <VBox/vmm/rem.h>
38#endif
39#include "HMInternal.h"
40#include <VBox/vmm/vm.h>
41#include "HMVMXR0.h"
42#include "dtrace/VBoxVMM.h"
43
44#ifdef DEBUG_ramshankar
45# define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS
46# define HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE
47# define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
48# define HMVMX_ALWAYS_CHECK_GUEST_STATE
49# define HMVMX_ALWAYS_TRAP_ALL_XCPTS
50# define HMVMX_ALWAYS_TRAP_PF
51# define HMVMX_ALWAYS_FLUSH_TLB
52# define HMVMX_ALWAYS_SWAP_EFER
53#endif
54
55
56/*********************************************************************************************************************************
57* Defined Constants And Macros *
58*********************************************************************************************************************************/
59/** Use the function table. */
60#define HMVMX_USE_FUNCTION_TABLE
61
62/** Determine which tagged-TLB flush handler to use. */
63#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
64#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
65#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
66#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
67
68/** @name HMVMX_READ_XXX
69 * Flags to skip redundant reads of some common VMCS fields that are not part of
70 * the guest-CPU or VCPU state but are needed while handling VM-exits.
71 */
72#define HMVMX_READ_IDT_VECTORING_INFO RT_BIT_32(0)
73#define HMVMX_READ_IDT_VECTORING_ERROR_CODE RT_BIT_32(1)
74#define HMVMX_READ_EXIT_QUALIFICATION RT_BIT_32(2)
75#define HMVMX_READ_EXIT_INSTR_LEN RT_BIT_32(3)
76#define HMVMX_READ_EXIT_INTERRUPTION_INFO RT_BIT_32(4)
77#define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5)
78#define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6)
79/** @} */
80
81/**
82 * States of the VMCS.
83 *
84 * This does not reflect all possible VMCS states but currently only those
85 * needed for maintaining the VMCS consistently even when thread-context hooks
86 * are used. Maybe later this can be extended (i.e. Nested Virtualization).
87 */
88#define HMVMX_VMCS_STATE_CLEAR RT_BIT(0)
89#define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1)
90#define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2)
91
92/**
93 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
94 * guest using hardware-assisted VMX.
95 *
96 * This excludes state like GPRs (other than RSP) which are always are
97 * swapped and restored across the world-switch and also registers like EFER,
98 * MSR which cannot be modified by the guest without causing a VM-exit.
99 */
100#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
101 | CPUMCTX_EXTRN_RFLAGS \
102 | CPUMCTX_EXTRN_RSP \
103 | CPUMCTX_EXTRN_SREG_MASK \
104 | CPUMCTX_EXTRN_TABLE_MASK \
105 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
106 | CPUMCTX_EXTRN_SYSCALL_MSRS \
107 | CPUMCTX_EXTRN_SYSENTER_MSRS \
108 | CPUMCTX_EXTRN_TSC_AUX \
109 | CPUMCTX_EXTRN_OTHER_MSRS \
110 | CPUMCTX_EXTRN_CR0 \
111 | CPUMCTX_EXTRN_CR3 \
112 | CPUMCTX_EXTRN_CR4 \
113 | CPUMCTX_EXTRN_DR7 \
114 | CPUMCTX_EXTRN_HM_VMX_MASK)
115
116/**
117 * Exception bitmap mask for real-mode guests (real-on-v86).
118 *
119 * We need to intercept all exceptions manually except:
120 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
121 * due to bugs in Intel CPUs.
122 * - \#PF need not be intercepted even in real-mode if we have Nested Paging
123 * support.
124 */
125#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
126 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
127 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
128 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
129 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
130 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
131 | RT_BIT(X86_XCPT_XF))
132
133/** Maximum VM-instruction error number. */
134#define HMVMX_INSTR_ERROR_MAX 28
135
136/** Profiling macro. */
137#ifdef HM_PROFILE_EXIT_DISPATCH
138# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
139# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
140#else
141# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
142# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
143#endif
144
145/** Assert that preemption is disabled or covered by thread-context hooks. */
146#define HMVMX_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHookIsEnabled(pVCpu) \
147 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
148
149/** Assert that we haven't migrated CPUs when thread-context hooks are not
150 * used. */
151#define HMVMX_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHookIsEnabled(pVCpu) \
152 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
153 ("Illegal migration! Entered on CPU %u Current %u\n", \
154 pVCpu->hm.s.idEnteredCpu, RTMpCpuId()))
155
156/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
157 * context. */
158#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
159 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
160 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
161
162/** Helper macro for VM-exit handlers called unexpectedly. */
163#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_pVmxTransient) \
164 do { \
165 (a_pVCpu)->hm.s.u32HMError = (a_pVmxTransient)->uExitReason; \
166 return VERR_VMX_UNEXPECTED_EXIT; \
167 } while (0)
168
169/** Macro for importing segment registers to the VMCS from the guest-CPU context. */
170#ifdef VMX_USE_CACHED_VMCS_ACCESSES
171# define HMVMX_IMPORT_SREG(Sel, a_pCtxSelReg) \
172 hmR0VmxImportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
173 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
174#else
175# define HMVMX_IMPORT_SREG(Sel, a_pCtxSelReg) \
176 hmR0VmxImportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
177 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
178#endif
179
180/** Macro for exporting segment registers to the VMCS from the guest-CPU context. */
181# define HMVMX_EXPORT_SREG(Sel, a_pCtxSelReg) \
182 hmR0VmxExportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
183 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
184
185
186/*********************************************************************************************************************************
187* Structures and Typedefs *
188*********************************************************************************************************************************/
189/**
190 * VMX transient state.
191 *
192 * A state structure for holding miscellaneous information across
193 * VMX non-root operation and restored after the transition.
194 */
195typedef struct VMXTRANSIENT
196{
197 /** The host's rflags/eflags. */
198 RTCCUINTREG fEFlags;
199#if HC_ARCH_BITS == 32
200 uint32_t u32Alignment0;
201#endif
202 /** The guest's TPR value used for TPR shadowing. */
203 uint8_t u8GuestTpr;
204 /** Alignment. */
205 uint8_t abAlignment0[7];
206
207 /** The basic VM-exit reason. */
208 uint16_t uExitReason;
209 /** Alignment. */
210 uint16_t u16Alignment0;
211 /** The VM-exit interruption error code. */
212 uint32_t uExitIntErrorCode;
213 /** The VM-exit exit code qualification. */
214 uint64_t uExitQualification;
215
216 /** The VM-exit interruption-information field. */
217 uint32_t uExitIntInfo;
218 /** The VM-exit instruction-length field. */
219 uint32_t cbInstr;
220 /** The VM-exit instruction-information field. */
221 union
222 {
223 /** Plain unsigned int representation. */
224 uint32_t u;
225 /** INS and OUTS information. */
226 struct
227 {
228 uint32_t u7Reserved0 : 7;
229 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
230 uint32_t u3AddrSize : 3;
231 uint32_t u5Reserved1 : 5;
232 /** The segment register (X86_SREG_XXX). */
233 uint32_t iSegReg : 3;
234 uint32_t uReserved2 : 14;
235 } StrIo;
236 /** INVEPT, INVVPID, INVPCID information. */
237 struct
238 {
239 /** Scaling; 0=no scaling, 1=scale-by-2, 2=scale-by-4, 3=scale-by-8. */
240 uint32_t u2Scaling : 2;
241 uint32_t u5Reserved0 : 5;
242 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
243 uint32_t u3AddrSize : 3;
244 uint32_t u1Reserved0 : 1;
245 uint32_t u4Reserved0 : 4;
246 /** The segment register (X86_SREG_XXX). */
247 uint32_t iSegReg : 3;
248 /** The index register (X86_GREG_XXX). */
249 uint32_t iIdxReg : 4;
250 /** Set if index register is invalid. */
251 uint32_t fIdxRegValid : 1;
252 /** The base register (X86_GREG_XXX). */
253 uint32_t iBaseReg : 4;
254 /** Set if base register is invalid. */
255 uint32_t fBaseRegValid : 1;
256 /** Register 2 (X86_GREG_XXX). */
257 uint32_t iReg2 : 4;
258 } Inv;
259 } ExitInstrInfo;
260 /** Whether the VM-entry failed or not. */
261 bool fVMEntryFailed;
262 /** Alignment. */
263 uint8_t abAlignment1[3];
264
265 /** The VM-entry interruption-information field. */
266 uint32_t uEntryIntInfo;
267 /** The VM-entry exception error code field. */
268 uint32_t uEntryXcptErrorCode;
269 /** The VM-entry instruction length field. */
270 uint32_t cbEntryInstr;
271
272 /** IDT-vectoring information field. */
273 uint32_t uIdtVectoringInfo;
274 /** IDT-vectoring error code. */
275 uint32_t uIdtVectoringErrorCode;
276
277 /** Mask of currently read VMCS fields; HMVMX_READ_XXX. */
278 uint32_t fVmcsFieldsRead;
279
280 /** Whether the guest debug state was active at the time of VM-exit. */
281 bool fWasGuestDebugStateActive;
282 /** Whether the hyper debug state was active at the time of VM-exit. */
283 bool fWasHyperDebugStateActive;
284 /** Whether TSC-offsetting should be setup before VM-entry. */
285 bool fUpdateTscOffsettingAndPreemptTimer;
286 /** Whether the VM-exit was caused by a page-fault during delivery of a
287 * contributory exception or a page-fault. */
288 bool fVectoringDoublePF;
289 /** Whether the VM-exit was caused by a page-fault during delivery of an
290 * external interrupt or NMI. */
291 bool fVectoringPF;
292} VMXTRANSIENT;
293AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
294AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntInfo, sizeof(uint64_t));
295AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo, sizeof(uint64_t));
296AssertCompileMemberAlignment(VMXTRANSIENT, fWasGuestDebugStateActive, sizeof(uint64_t));
297AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
298/** Pointer to VMX transient state. */
299typedef VMXTRANSIENT *PVMXTRANSIENT;
300
301
302/**
303 * MSR-bitmap read permissions.
304 */
305typedef enum VMXMSREXITREAD
306{
307 /** Reading this MSR causes a VM-exit. */
308 VMXMSREXIT_INTERCEPT_READ = 0xb,
309 /** Reading this MSR does not cause a VM-exit. */
310 VMXMSREXIT_PASSTHRU_READ
311} VMXMSREXITREAD;
312/** Pointer to MSR-bitmap read permissions. */
313typedef VMXMSREXITREAD* PVMXMSREXITREAD;
314
315/**
316 * MSR-bitmap write permissions.
317 */
318typedef enum VMXMSREXITWRITE
319{
320 /** Writing to this MSR causes a VM-exit. */
321 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
322 /** Writing to this MSR does not cause a VM-exit. */
323 VMXMSREXIT_PASSTHRU_WRITE
324} VMXMSREXITWRITE;
325/** Pointer to MSR-bitmap write permissions. */
326typedef VMXMSREXITWRITE* PVMXMSREXITWRITE;
327
328
329/**
330 * VMX VM-exit handler.
331 *
332 * @returns Strict VBox status code (i.e. informational status codes too).
333 * @param pVCpu The cross context virtual CPU structure.
334 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
335 * out-of-sync. Make sure to update the required
336 * fields before using them.
337 * @param pVmxTransient Pointer to the VMX-transient structure.
338 */
339#ifndef HMVMX_USE_FUNCTION_TABLE
340typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
341#else
342typedef DECLCALLBACK(VBOXSTRICTRC) FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
343/** Pointer to VM-exit handler. */
344typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
345#endif
346
347/**
348 * VMX VM-exit handler, non-strict status code.
349 *
350 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
351 *
352 * @returns VBox status code, no informational status code returned.
353 * @param pVCpu The cross context virtual CPU structure.
354 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
355 * out-of-sync. Make sure to update the required
356 * fields before using them.
357 * @param pVmxTransient Pointer to the VMX-transient structure.
358 *
359 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
360 * use of that status code will be replaced with VINF_EM_SOMETHING
361 * later when switching over to IEM.
362 */
363#ifndef HMVMX_USE_FUNCTION_TABLE
364typedef int FNVMXEXITHANDLERNSRC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
365#else
366typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
367#endif
368
369
370/*********************************************************************************************************************************
371* Internal Functions *
372*********************************************************************************************************************************/
373static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXTLBFLUSHEPT enmTlbFlush);
374static void hmR0VmxFlushVpid(PVMCPU pVCpu, VMXTLBFLUSHVPID enmTlbFlush, RTGCPTR GCPtr);
375static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu);
376static int hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat);
377static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
378 RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *pfIntrState);
379#if HC_ARCH_BITS == 32
380static int hmR0VmxInitVmcsReadCache(PVMCPU pVCpu);
381#endif
382#ifndef HMVMX_USE_FUNCTION_TABLE
383DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
384# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
385# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
386#else
387# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
388# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
389#endif
390
391
392/** @name VM-exit handlers.
393 * @{
394 */
395static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmi;
396static FNVMXEXITHANDLER hmR0VmxExitExtInt;
397static FNVMXEXITHANDLER hmR0VmxExitTripleFault;
398static FNVMXEXITHANDLERNSRC hmR0VmxExitInitSignal;
399static FNVMXEXITHANDLERNSRC hmR0VmxExitSipi;
400static FNVMXEXITHANDLERNSRC hmR0VmxExitIoSmi;
401static FNVMXEXITHANDLERNSRC hmR0VmxExitSmi;
402static FNVMXEXITHANDLERNSRC hmR0VmxExitIntWindow;
403static FNVMXEXITHANDLERNSRC hmR0VmxExitNmiWindow;
404static FNVMXEXITHANDLER hmR0VmxExitTaskSwitch;
405static FNVMXEXITHANDLER hmR0VmxExitCpuid;
406static FNVMXEXITHANDLER hmR0VmxExitGetsec;
407static FNVMXEXITHANDLER hmR0VmxExitHlt;
408static FNVMXEXITHANDLERNSRC hmR0VmxExitInvd;
409static FNVMXEXITHANDLER hmR0VmxExitInvlpg;
410static FNVMXEXITHANDLER hmR0VmxExitRdpmc;
411static FNVMXEXITHANDLER hmR0VmxExitVmcall;
412static FNVMXEXITHANDLER hmR0VmxExitRdtsc;
413static FNVMXEXITHANDLERNSRC hmR0VmxExitRsm;
414static FNVMXEXITHANDLERNSRC hmR0VmxExitSetPendingXcptUD;
415static FNVMXEXITHANDLER hmR0VmxExitMovCRx;
416static FNVMXEXITHANDLER hmR0VmxExitMovDRx;
417static FNVMXEXITHANDLER hmR0VmxExitIoInstr;
418static FNVMXEXITHANDLER hmR0VmxExitRdmsr;
419static FNVMXEXITHANDLER hmR0VmxExitWrmsr;
420static FNVMXEXITHANDLERNSRC hmR0VmxExitErrInvalidGuestState;
421static FNVMXEXITHANDLERNSRC hmR0VmxExitErrMsrLoad;
422static FNVMXEXITHANDLERNSRC hmR0VmxExitErrUndefined;
423static FNVMXEXITHANDLER hmR0VmxExitMwait;
424static FNVMXEXITHANDLER hmR0VmxExitMtf;
425static FNVMXEXITHANDLER hmR0VmxExitMonitor;
426static FNVMXEXITHANDLER hmR0VmxExitPause;
427static FNVMXEXITHANDLERNSRC hmR0VmxExitErrMachineCheck;
428static FNVMXEXITHANDLERNSRC hmR0VmxExitTprBelowThreshold;
429static FNVMXEXITHANDLER hmR0VmxExitApicAccess;
430static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
431static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
432static FNVMXEXITHANDLER hmR0VmxExitEptViolation;
433static FNVMXEXITHANDLER hmR0VmxExitEptMisconfig;
434static FNVMXEXITHANDLER hmR0VmxExitRdtscp;
435static FNVMXEXITHANDLER hmR0VmxExitPreemptTimer;
436static FNVMXEXITHANDLERNSRC hmR0VmxExitWbinvd;
437static FNVMXEXITHANDLER hmR0VmxExitXsetbv;
438static FNVMXEXITHANDLER hmR0VmxExitRdrand;
439static FNVMXEXITHANDLER hmR0VmxExitInvpcid;
440/** @} */
441
442static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
443static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
444static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
445static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
446static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
447static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
448static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
449static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu, PCPUMCTX pCtx);
450
451
452/*********************************************************************************************************************************
453* Global Variables *
454*********************************************************************************************************************************/
455#ifdef HMVMX_USE_FUNCTION_TABLE
456
457/**
458 * VMX_EXIT dispatch table.
459 */
460static const PFNVMXEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
461{
462 /* 00 VMX_EXIT_XCPT_OR_NMI */ hmR0VmxExitXcptOrNmi,
463 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
464 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
465 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
466 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
467 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
468 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
469 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
470 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
471 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
472 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
473 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
474 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
475 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
476 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
477 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
478 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
479 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
480 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitVmcall,
481 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
482 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
483 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
484 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
485 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
486 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
487 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
488 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
489 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
490 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
491 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
492 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
493 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
494 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
495 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
496 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
497 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
498 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
499 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
500 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
501 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
502 /* 40 UNDEFINED */ hmR0VmxExitPause,
503 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
504 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
505 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
506 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
507 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
508 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
509 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
510 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
511 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
512 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
513 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
514 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
515 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
516 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
517 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
518 /* 56 VMX_EXIT_APIC_WRITE */ hmR0VmxExitErrUndefined,
519 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
520 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
521 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD,
522 /* 60 VMX_EXIT_ENCLS */ hmR0VmxExitErrUndefined,
523 /* 61 VMX_EXIT_RDSEED */ hmR0VmxExitErrUndefined, /* only spurious exits, so undefined */
524 /* 62 VMX_EXIT_PML_FULL */ hmR0VmxExitErrUndefined,
525 /* 63 VMX_EXIT_XSAVES */ hmR0VmxExitSetPendingXcptUD,
526 /* 64 VMX_EXIT_XRSTORS */ hmR0VmxExitSetPendingXcptUD,
527};
528#endif /* HMVMX_USE_FUNCTION_TABLE */
529
530#ifdef VBOX_STRICT
531static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
532{
533 /* 0 */ "(Not Used)",
534 /* 1 */ "VMCALL executed in VMX root operation.",
535 /* 2 */ "VMCLEAR with invalid physical address.",
536 /* 3 */ "VMCLEAR with VMXON pointer.",
537 /* 4 */ "VMLAUNCH with non-clear VMCS.",
538 /* 5 */ "VMRESUME with non-launched VMCS.",
539 /* 6 */ "VMRESUME after VMXOFF",
540 /* 7 */ "VM-entry with invalid control fields.",
541 /* 8 */ "VM-entry with invalid host state fields.",
542 /* 9 */ "VMPTRLD with invalid physical address.",
543 /* 10 */ "VMPTRLD with VMXON pointer.",
544 /* 11 */ "VMPTRLD with incorrect revision identifier.",
545 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
546 /* 13 */ "VMWRITE to read-only VMCS component.",
547 /* 14 */ "(Not Used)",
548 /* 15 */ "VMXON executed in VMX root operation.",
549 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
550 /* 17 */ "VM-entry with non-launched executing VMCS.",
551 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
552 /* 19 */ "VMCALL with non-clear VMCS.",
553 /* 20 */ "VMCALL with invalid VM-exit control fields.",
554 /* 21 */ "(Not Used)",
555 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
556 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
557 /* 24 */ "VMCALL with invalid SMM-monitor features.",
558 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
559 /* 26 */ "VM-entry with events blocked by MOV SS.",
560 /* 27 */ "(Not Used)",
561 /* 28 */ "Invalid operand to INVEPT/INVVPID."
562};
563#endif /* VBOX_STRICT */
564
565
566
567/**
568 * Updates the VM's last error record.
569 *
570 * If there was a VMX instruction error, reads the error data from the VMCS and
571 * updates VCPU's last error record as well.
572 *
573 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
574 * Can be NULL if @a rc is not VERR_VMX_UNABLE_TO_START_VM or
575 * VERR_VMX_INVALID_VMCS_FIELD.
576 * @param rc The error code.
577 */
578static void hmR0VmxUpdateErrorRecord(PVMCPU pVCpu, int rc)
579{
580 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
581 || rc == VERR_VMX_UNABLE_TO_START_VM)
582 {
583 AssertPtrReturnVoid(pVCpu);
584 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
585 }
586 pVCpu->CTX_SUFF(pVM)->hm.s.rcInit = rc;
587}
588
589
590/**
591 * Reads the VM-entry interruption-information field from the VMCS into the VMX
592 * transient structure.
593 *
594 * @returns VBox status code.
595 * @param pVmxTransient Pointer to the VMX transient structure.
596 *
597 * @remarks No-long-jump zone!!!
598 */
599DECLINLINE(int) hmR0VmxReadEntryIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
600{
601 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
602 AssertRCReturn(rc, rc);
603 return VINF_SUCCESS;
604}
605
606#ifdef VBOX_STRICT
607/**
608 * Reads the VM-entry exception error code field from the VMCS into
609 * the VMX transient structure.
610 *
611 * @returns VBox status code.
612 * @param pVmxTransient Pointer to the VMX transient structure.
613 *
614 * @remarks No-long-jump zone!!!
615 */
616DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
617{
618 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
619 AssertRCReturn(rc, rc);
620 return VINF_SUCCESS;
621}
622
623
624/**
625 * Reads the VM-entry exception error code field from the VMCS into
626 * the VMX transient structure.
627 *
628 * @returns VBox status code.
629 * @param pVmxTransient Pointer to the VMX transient structure.
630 *
631 * @remarks No-long-jump zone!!!
632 */
633DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
634{
635 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
636 AssertRCReturn(rc, rc);
637 return VINF_SUCCESS;
638}
639#endif /* VBOX_STRICT */
640
641
642/**
643 * Reads the VM-exit interruption-information field from the VMCS into the VMX
644 * transient structure.
645 *
646 * @returns VBox status code.
647 * @param pVmxTransient Pointer to the VMX transient structure.
648 */
649DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
650{
651 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
652 {
653 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
654 AssertRCReturn(rc,rc);
655 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
656 }
657 return VINF_SUCCESS;
658}
659
660
661/**
662 * Reads the VM-exit interruption error code from the VMCS into the VMX
663 * transient structure.
664 *
665 * @returns VBox status code.
666 * @param pVmxTransient Pointer to the VMX transient structure.
667 */
668DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
669{
670 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
671 {
672 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
673 AssertRCReturn(rc, rc);
674 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
675 }
676 return VINF_SUCCESS;
677}
678
679
680/**
681 * Reads the VM-exit instruction length field from the VMCS into the VMX
682 * transient structure.
683 *
684 * @returns VBox status code.
685 * @param pVmxTransient Pointer to the VMX transient structure.
686 */
687DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
688{
689 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
690 {
691 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
692 AssertRCReturn(rc, rc);
693 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
694 }
695 return VINF_SUCCESS;
696}
697
698
699/**
700 * Reads the VM-exit instruction-information field from the VMCS into
701 * the VMX transient structure.
702 *
703 * @returns VBox status code.
704 * @param pVmxTransient Pointer to the VMX transient structure.
705 */
706DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient)
707{
708 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
709 {
710 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
711 AssertRCReturn(rc, rc);
712 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
713 }
714 return VINF_SUCCESS;
715}
716
717
718/**
719 * Reads the exit code qualification from the VMCS into the VMX transient
720 * structure.
721 *
722 * @returns VBox status code.
723 * @param pVCpu The cross context virtual CPU structure of the
724 * calling EMT. (Required for the VMCS cache case.)
725 * @param pVmxTransient Pointer to the VMX transient structure.
726 */
727DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
728{
729 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
730 {
731 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification); NOREF(pVCpu);
732 AssertRCReturn(rc, rc);
733 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
734 }
735 return VINF_SUCCESS;
736}
737
738
739/**
740 * Reads the IDT-vectoring information field from the VMCS into the VMX
741 * transient structure.
742 *
743 * @returns VBox status code.
744 * @param pVmxTransient Pointer to the VMX transient structure.
745 *
746 * @remarks No-long-jump zone!!!
747 */
748DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
749{
750 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
751 {
752 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
753 AssertRCReturn(rc, rc);
754 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
755 }
756 return VINF_SUCCESS;
757}
758
759
760/**
761 * Reads the IDT-vectoring error code from the VMCS into the VMX
762 * transient structure.
763 *
764 * @returns VBox status code.
765 * @param pVmxTransient Pointer to the VMX transient structure.
766 */
767DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
768{
769 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
770 {
771 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
772 AssertRCReturn(rc, rc);
773 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
774 }
775 return VINF_SUCCESS;
776}
777
778
779/**
780 * Enters VMX root mode operation on the current CPU.
781 *
782 * @returns VBox status code.
783 * @param pVM The cross context VM structure. Can be
784 * NULL, after a resume.
785 * @param HCPhysCpuPage Physical address of the VMXON region.
786 * @param pvCpuPage Pointer to the VMXON region.
787 */
788static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
789{
790 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
791 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
792 Assert(pvCpuPage);
793 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
794
795 if (pVM)
796 {
797 /* Write the VMCS revision dword to the VMXON region. */
798 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
799 }
800
801 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */
802 RTCCUINTREG fEFlags = ASMIntDisableFlags();
803
804 /* Enable the VMX bit in CR4 if necessary. */
805 RTCCUINTREG uOldCr4 = SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);
806
807 /* Enter VMX root mode. */
808 int rc = VMXEnable(HCPhysCpuPage);
809 if (RT_FAILURE(rc))
810 {
811 if (!(uOldCr4 & X86_CR4_VMXE))
812 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
813
814 if (pVM)
815 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
816 }
817
818 /* Restore interrupts. */
819 ASMSetFlags(fEFlags);
820 return rc;
821}
822
823
824/**
825 * Exits VMX root mode operation on the current CPU.
826 *
827 * @returns VBox status code.
828 */
829static int hmR0VmxLeaveRootMode(void)
830{
831 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
832
833 /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */
834 RTCCUINTREG fEFlags = ASMIntDisableFlags();
835
836 /* If we're for some reason not in VMX root mode, then don't leave it. */
837 RTCCUINTREG uHostCR4 = ASMGetCR4();
838
839 int rc;
840 if (uHostCR4 & X86_CR4_VMXE)
841 {
842 /* Exit VMX root mode and clear the VMX bit in CR4. */
843 VMXDisable();
844 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
845 rc = VINF_SUCCESS;
846 }
847 else
848 rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE;
849
850 /* Restore interrupts. */
851 ASMSetFlags(fEFlags);
852 return rc;
853}
854
855
856/**
857 * Allocates and maps one physically contiguous page. The allocated page is
858 * zero'd out. (Used by various VT-x structures).
859 *
860 * @returns IPRT status code.
861 * @param pMemObj Pointer to the ring-0 memory object.
862 * @param ppVirt Where to store the virtual address of the
863 * allocation.
864 * @param pHCPhys Where to store the physical address of the
865 * allocation.
866 */
867static int hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
868{
869 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
870 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
871 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
872
873 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
874 if (RT_FAILURE(rc))
875 return rc;
876 *ppVirt = RTR0MemObjAddress(*pMemObj);
877 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
878 ASMMemZero32(*ppVirt, PAGE_SIZE);
879 return VINF_SUCCESS;
880}
881
882
883/**
884 * Frees and unmaps an allocated physical page.
885 *
886 * @param pMemObj Pointer to the ring-0 memory object.
887 * @param ppVirt Where to re-initialize the virtual address of
888 * allocation as 0.
889 * @param pHCPhys Where to re-initialize the physical address of the
890 * allocation as 0.
891 */
892static void hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
893{
894 AssertPtr(pMemObj);
895 AssertPtr(ppVirt);
896 AssertPtr(pHCPhys);
897 if (*pMemObj != NIL_RTR0MEMOBJ)
898 {
899 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
900 AssertRC(rc);
901 *pMemObj = NIL_RTR0MEMOBJ;
902 *ppVirt = 0;
903 *pHCPhys = 0;
904 }
905}
906
907
908/**
909 * Worker function to free VT-x related structures.
910 *
911 * @returns IPRT status code.
912 * @param pVM The cross context VM structure.
913 */
914static void hmR0VmxStructsFree(PVM pVM)
915{
916 for (VMCPUID i = 0; i < pVM->cCpus; i++)
917 {
918 PVMCPU pVCpu = &pVM->aCpus[i];
919 AssertPtr(pVCpu);
920
921 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
922 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
923
924 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
925 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
926
927 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
928 }
929
930 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
931#ifdef VBOX_WITH_CRASHDUMP_MAGIC
932 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
933#endif
934}
935
936
937/**
938 * Worker function to allocate VT-x related VM structures.
939 *
940 * @returns IPRT status code.
941 * @param pVM The cross context VM structure.
942 */
943static int hmR0VmxStructsAlloc(PVM pVM)
944{
945 /*
946 * Initialize members up-front so we can cleanup properly on allocation failure.
947 */
948#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
949 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
950 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
951 pVM->hm.s.vmx.HCPhys##a_Name = 0;
952
953#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
954 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
955 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
956 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
957
958#ifdef VBOX_WITH_CRASHDUMP_MAGIC
959 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
960#endif
961 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
962
963 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
964 for (VMCPUID i = 0; i < pVM->cCpus; i++)
965 {
966 PVMCPU pVCpu = &pVM->aCpus[i];
967 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
968 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
969 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
970 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
971 }
972#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
973#undef VMXLOCAL_INIT_VM_MEMOBJ
974
975 /* The VMCS size cannot be more than 4096 bytes. See Intel spec. Appendix A.1 "Basic VMX Information". */
976 AssertReturnStmt(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.Msrs.u64BasicInfo) <= PAGE_SIZE,
977 (&pVM->aCpus[0])->hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE,
978 VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO);
979
980 /*
981 * Allocate all the VT-x structures.
982 */
983 int rc = VINF_SUCCESS;
984#ifdef VBOX_WITH_CRASHDUMP_MAGIC
985 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
986 if (RT_FAILURE(rc))
987 goto cleanup;
988 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
989 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
990#endif
991
992 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
993 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
994 {
995 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
996 &pVM->hm.s.vmx.HCPhysApicAccess);
997 if (RT_FAILURE(rc))
998 goto cleanup;
999 }
1000
1001 /*
1002 * Initialize per-VCPU VT-x structures.
1003 */
1004 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1005 {
1006 PVMCPU pVCpu = &pVM->aCpus[i];
1007 AssertPtr(pVCpu);
1008
1009 /* Allocate the VM control structure (VMCS). */
1010 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
1011 if (RT_FAILURE(rc))
1012 goto cleanup;
1013
1014 /* Get the allocated virtual-APIC page from the APIC device for transparent TPR accesses. */
1015 if ( PDMHasApic(pVM)
1016 && (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW))
1017 {
1018 rc = APICGetApicPageForCpu(pVCpu, &pVCpu->hm.s.vmx.HCPhysVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
1019 NULL /* pR3Ptr */, NULL /* pRCPtr */);
1020 if (RT_FAILURE(rc))
1021 goto cleanup;
1022 }
1023
1024 /*
1025 * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for
1026 * transparent accesses of specific MSRs.
1027 *
1028 * If the condition for enabling MSR bitmaps changes here, don't forget to
1029 * update HMAreMsrBitmapsAvailable().
1030 */
1031 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1032 {
1033 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
1034 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1035 if (RT_FAILURE(rc))
1036 goto cleanup;
1037 ASMMemFill32(pVCpu->hm.s.vmx.pvMsrBitmap, PAGE_SIZE, UINT32_C(0xffffffff));
1038 }
1039
1040 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
1041 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
1042 if (RT_FAILURE(rc))
1043 goto cleanup;
1044
1045 /* Allocate the VM-exit MSR-load page for the host MSRs. */
1046 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
1047 if (RT_FAILURE(rc))
1048 goto cleanup;
1049 }
1050
1051 return VINF_SUCCESS;
1052
1053cleanup:
1054 hmR0VmxStructsFree(pVM);
1055 return rc;
1056}
1057
1058
1059/**
1060 * Does global VT-x initialization (called during module initialization).
1061 *
1062 * @returns VBox status code.
1063 */
1064VMMR0DECL(int) VMXR0GlobalInit(void)
1065{
1066#ifdef HMVMX_USE_FUNCTION_TABLE
1067 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
1068# ifdef VBOX_STRICT
1069 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
1070 Assert(g_apfnVMExitHandlers[i]);
1071# endif
1072#endif
1073 return VINF_SUCCESS;
1074}
1075
1076
1077/**
1078 * Does global VT-x termination (called during module termination).
1079 */
1080VMMR0DECL(void) VMXR0GlobalTerm()
1081{
1082 /* Nothing to do currently. */
1083}
1084
1085
1086/**
1087 * Sets up and activates VT-x on the current CPU.
1088 *
1089 * @returns VBox status code.
1090 * @param pHostCpu Pointer to the global CPU info struct.
1091 * @param pVM The cross context VM structure. Can be
1092 * NULL after a host resume operation.
1093 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
1094 * fEnabledByHost is @c true).
1095 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
1096 * @a fEnabledByHost is @c true).
1097 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
1098 * enable VT-x on the host.
1099 * @param pvMsrs Opaque pointer to VMXMSRS struct.
1100 */
1101VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
1102 void *pvMsrs)
1103{
1104 Assert(pHostCpu);
1105 Assert(pvMsrs);
1106 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1107
1108 /* Enable VT-x if it's not already enabled by the host. */
1109 if (!fEnabledByHost)
1110 {
1111 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
1112 if (RT_FAILURE(rc))
1113 return rc;
1114 }
1115
1116 /*
1117 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been
1118 * using EPTPs) so we don't retain any stale guest-physical mappings which won't get
1119 * invalidated when flushing by VPID.
1120 */
1121 PVMXMSRS pMsrs = (PVMXMSRS)pvMsrs;
1122 if (pMsrs->u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1123 {
1124 hmR0VmxFlushEpt(NULL /* pVCpu */, VMXTLBFLUSHEPT_ALL_CONTEXTS);
1125 pHostCpu->fFlushAsidBeforeUse = false;
1126 }
1127 else
1128 pHostCpu->fFlushAsidBeforeUse = true;
1129
1130 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
1131 ++pHostCpu->cTlbFlushes;
1132
1133 return VINF_SUCCESS;
1134}
1135
1136
1137/**
1138 * Deactivates VT-x on the current CPU.
1139 *
1140 * @returns VBox status code.
1141 * @param pHostCpu Pointer to the global CPU info struct.
1142 * @param pvCpuPage Pointer to the VMXON region.
1143 * @param HCPhysCpuPage Physical address of the VMXON region.
1144 *
1145 * @remarks This function should never be called when SUPR0EnableVTx() or
1146 * similar was used to enable VT-x on the host.
1147 */
1148VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
1149{
1150 RT_NOREF3(pHostCpu, pvCpuPage, HCPhysCpuPage);
1151
1152 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1153 return hmR0VmxLeaveRootMode();
1154}
1155
1156
1157/**
1158 * Sets the permission bits for the specified MSR in the MSR bitmap.
1159 *
1160 * @param pVCpu The cross context virtual CPU structure.
1161 * @param uMsr The MSR value.
1162 * @param enmRead Whether reading this MSR causes a VM-exit.
1163 * @param enmWrite Whether writing this MSR causes a VM-exit.
1164 */
1165static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
1166{
1167 int32_t iBit;
1168 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1169
1170 /*
1171 * Layout:
1172 * 0x000 - 0x3ff - Low MSR read bits
1173 * 0x400 - 0x7ff - High MSR read bits
1174 * 0x800 - 0xbff - Low MSR write bits
1175 * 0xc00 - 0xfff - High MSR write bits
1176 */
1177 if (uMsr <= 0x00001FFF)
1178 iBit = uMsr;
1179 else if (uMsr - UINT32_C(0xC0000000) <= UINT32_C(0x00001FFF))
1180 {
1181 iBit = uMsr - UINT32_C(0xC0000000);
1182 pbMsrBitmap += 0x400;
1183 }
1184 else
1185 AssertMsgFailedReturnVoid(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1186
1187 Assert(iBit <= 0x1fff);
1188 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1189 ASMBitSet(pbMsrBitmap, iBit);
1190 else
1191 ASMBitClear(pbMsrBitmap, iBit);
1192
1193 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1194 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1195 else
1196 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1197}
1198
1199
1200#ifdef VBOX_STRICT
1201/**
1202 * Gets the permission bits for the specified MSR in the MSR bitmap.
1203 *
1204 * @returns VBox status code.
1205 * @retval VINF_SUCCESS if the specified MSR is found.
1206 * @retval VERR_NOT_FOUND if the specified MSR is not found.
1207 * @retval VERR_NOT_SUPPORTED if VT-x doesn't allow the MSR.
1208 *
1209 * @param pVCpu The cross context virtual CPU structure.
1210 * @param uMsr The MSR.
1211 * @param penmRead Where to store the read permissions.
1212 * @param penmWrite Where to store the write permissions.
1213 */
1214static int hmR0VmxGetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, PVMXMSREXITREAD penmRead, PVMXMSREXITWRITE penmWrite)
1215{
1216 AssertPtrReturn(penmRead, VERR_INVALID_PARAMETER);
1217 AssertPtrReturn(penmWrite, VERR_INVALID_PARAMETER);
1218 int32_t iBit;
1219 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1220
1221 /* See hmR0VmxSetMsrPermission() for the layout. */
1222 if (uMsr <= 0x00001FFF)
1223 iBit = uMsr;
1224 else if ( uMsr >= 0xC0000000
1225 && uMsr <= 0xC0001FFF)
1226 {
1227 iBit = (uMsr - 0xC0000000);
1228 pbMsrBitmap += 0x400;
1229 }
1230 else
1231 AssertMsgFailedReturn(("hmR0VmxGetMsrPermission: Invalid MSR %#RX32\n", uMsr), VERR_NOT_SUPPORTED);
1232
1233 Assert(iBit <= 0x1fff);
1234 if (ASMBitTest(pbMsrBitmap, iBit))
1235 *penmRead = VMXMSREXIT_INTERCEPT_READ;
1236 else
1237 *penmRead = VMXMSREXIT_PASSTHRU_READ;
1238
1239 if (ASMBitTest(pbMsrBitmap + 0x800, iBit))
1240 *penmWrite = VMXMSREXIT_INTERCEPT_WRITE;
1241 else
1242 *penmWrite = VMXMSREXIT_PASSTHRU_WRITE;
1243 return VINF_SUCCESS;
1244}
1245#endif /* VBOX_STRICT */
1246
1247
1248/**
1249 * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
1250 * area.
1251 *
1252 * @returns VBox status code.
1253 * @param pVCpu The cross context virtual CPU structure.
1254 * @param cMsrs The number of MSRs.
1255 */
1256static int hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)
1257{
1258 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
1259 uint32_t const cMaxSupportedMsrs = MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc);
1260 if (RT_UNLIKELY(cMsrs > cMaxSupportedMsrs))
1261 {
1262 LogRel(("CPU auto-load/store MSR count in VMCS exceeded cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs));
1263 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
1264 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1265 }
1266
1267 /* Update number of guest MSRs to load/store across the world-switch. */
1268 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs);
1269 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs);
1270
1271 /* Update number of host MSRs to load after the world-switch. Identical to guest-MSR count as it's always paired. */
1272 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs);
1273 AssertRCReturn(rc, rc);
1274
1275 /* Update the VCPU's copy of the MSR count. */
1276 pVCpu->hm.s.vmx.cMsrs = cMsrs;
1277
1278 return VINF_SUCCESS;
1279}
1280
1281
1282/**
1283 * Adds a new (or updates the value of an existing) guest/host MSR
1284 * pair to be swapped during the world-switch as part of the
1285 * auto-load/store MSR area in the VMCS.
1286 *
1287 * @returns VBox status code.
1288 * @param pVCpu The cross context virtual CPU structure.
1289 * @param uMsr The MSR.
1290 * @param uGuestMsrValue Value of the guest MSR.
1291 * @param fUpdateHostMsr Whether to update the value of the host MSR if
1292 * necessary.
1293 * @param pfAddedAndUpdated Where to store whether the MSR was added -and-
1294 * its value was updated. Optional, can be NULL.
1295 */
1296static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr,
1297 bool *pfAddedAndUpdated)
1298{
1299 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1300 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1301 uint32_t i;
1302 for (i = 0; i < cMsrs; i++)
1303 {
1304 if (pGuestMsr->u32Msr == uMsr)
1305 break;
1306 pGuestMsr++;
1307 }
1308
1309 bool fAdded = false;
1310 if (i == cMsrs)
1311 {
1312 ++cMsrs;
1313 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1314 AssertMsgRCReturn(rc, ("hmR0VmxAddAutoLoadStoreMsr: Insufficient space to add MSR %u\n", uMsr), rc);
1315
1316 /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */
1317 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1318 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1319
1320 fAdded = true;
1321 }
1322
1323 /* Update the MSR values in the auto-load/store MSR area. */
1324 pGuestMsr->u32Msr = uMsr;
1325 pGuestMsr->u64Value = uGuestMsrValue;
1326
1327 /* Create/update the MSR slot in the host MSR area. */
1328 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1329 pHostMsr += i;
1330 pHostMsr->u32Msr = uMsr;
1331
1332 /*
1333 * Update the host MSR only when requested by the caller AND when we're
1334 * adding it to the auto-load/store area. Otherwise, it would have been
1335 * updated by hmR0VmxExportHostMsrs(). We do this for performance reasons.
1336 */
1337 bool fUpdatedMsrValue = false;
1338 if ( fAdded
1339 && fUpdateHostMsr)
1340 {
1341 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1342 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1343 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1344 fUpdatedMsrValue = true;
1345 }
1346
1347 if (pfAddedAndUpdated)
1348 *pfAddedAndUpdated = fUpdatedMsrValue;
1349 return VINF_SUCCESS;
1350}
1351
1352
1353/**
1354 * Removes a guest/host MSR pair to be swapped during the world-switch from the
1355 * auto-load/store MSR area in the VMCS.
1356 *
1357 * @returns VBox status code.
1358 * @param pVCpu The cross context virtual CPU structure.
1359 * @param uMsr The MSR.
1360 */
1361static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr)
1362{
1363 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1364 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1365 for (uint32_t i = 0; i < cMsrs; i++)
1366 {
1367 /* Find the MSR. */
1368 if (pGuestMsr->u32Msr == uMsr)
1369 {
1370 /* If it's the last MSR, simply reduce the count. */
1371 if (i == cMsrs - 1)
1372 {
1373 --cMsrs;
1374 break;
1375 }
1376
1377 /* Remove it by swapping the last MSR in place of it, and reducing the count. */
1378 PVMXAUTOMSR pLastGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1379 pLastGuestMsr += cMsrs - 1;
1380 pGuestMsr->u32Msr = pLastGuestMsr->u32Msr;
1381 pGuestMsr->u64Value = pLastGuestMsr->u64Value;
1382
1383 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1384 PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1385 pLastHostMsr += cMsrs - 1;
1386 pHostMsr->u32Msr = pLastHostMsr->u32Msr;
1387 pHostMsr->u64Value = pLastHostMsr->u64Value;
1388 --cMsrs;
1389 break;
1390 }
1391 pGuestMsr++;
1392 }
1393
1394 /* Update the VMCS if the count changed (meaning the MSR was found). */
1395 if (cMsrs != pVCpu->hm.s.vmx.cMsrs)
1396 {
1397 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1398 AssertRCReturn(rc, rc);
1399
1400 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
1401 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1402 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
1403
1404 Log4Func(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));
1405 return VINF_SUCCESS;
1406 }
1407
1408 return VERR_NOT_FOUND;
1409}
1410
1411
1412/**
1413 * Checks if the specified guest MSR is part of the auto-load/store area in
1414 * the VMCS.
1415 *
1416 * @returns true if found, false otherwise.
1417 * @param pVCpu The cross context virtual CPU structure.
1418 * @param uMsr The MSR to find.
1419 */
1420static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1421{
1422 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1423 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1424
1425 for (uint32_t i = 0; i < cMsrs; i++, pGuestMsr++)
1426 {
1427 if (pGuestMsr->u32Msr == uMsr)
1428 return true;
1429 }
1430 return false;
1431}
1432
1433
1434/**
1435 * Updates the value of all host MSRs in the auto-load/store area in the VMCS.
1436 *
1437 * @param pVCpu The cross context virtual CPU structure.
1438 *
1439 * @remarks No-long-jump zone!!!
1440 */
1441static void hmR0VmxUpdateAutoLoadStoreHostMsrs(PVMCPU pVCpu)
1442{
1443 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1444 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1445 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1446 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1447
1448 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1449 {
1450 AssertReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr);
1451
1452 /*
1453 * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
1454 * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
1455 */
1456 if (pHostMsr->u32Msr == MSR_K6_EFER)
1457 pHostMsr->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostEfer;
1458 else
1459 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1460 }
1461
1462 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
1463}
1464
1465
1466/**
1467 * Saves a set of host MSRs to allow read/write passthru access to the guest and
1468 * perform lazy restoration of the host MSRs while leaving VT-x.
1469 *
1470 * @param pVCpu The cross context virtual CPU structure.
1471 *
1472 * @remarks No-long-jump zone!!!
1473 */
1474static void hmR0VmxLazySaveHostMsrs(PVMCPU pVCpu)
1475{
1476 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1477
1478 /*
1479 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls().
1480 */
1481 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST))
1482 {
1483 Assert(!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)); /* Guest MSRs better not be loaded now. */
1484#if HC_ARCH_BITS == 64
1485 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
1486 {
1487 pVCpu->hm.s.vmx.u64HostLStarMsr = ASMRdMsr(MSR_K8_LSTAR);
1488 pVCpu->hm.s.vmx.u64HostStarMsr = ASMRdMsr(MSR_K6_STAR);
1489 pVCpu->hm.s.vmx.u64HostSFMaskMsr = ASMRdMsr(MSR_K8_SF_MASK);
1490 pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1491 }
1492#endif
1493 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;
1494 }
1495}
1496
1497
1498/**
1499 * Checks whether the MSR belongs to the set of guest MSRs that we restore
1500 * lazily while leaving VT-x.
1501 *
1502 * @returns true if it does, false otherwise.
1503 * @param pVCpu The cross context virtual CPU structure.
1504 * @param uMsr The MSR to check.
1505 */
1506static bool hmR0VmxIsLazyGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1507{
1508 NOREF(pVCpu);
1509#if HC_ARCH_BITS == 64
1510 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
1511 {
1512 switch (uMsr)
1513 {
1514 case MSR_K8_LSTAR:
1515 case MSR_K6_STAR:
1516 case MSR_K8_SF_MASK:
1517 case MSR_K8_KERNEL_GS_BASE:
1518 return true;
1519 }
1520 }
1521#else
1522 RT_NOREF(pVCpu, uMsr);
1523#endif
1524 return false;
1525}
1526
1527
1528/**
1529 * Loads a set of guests MSRs to allow read/passthru to the guest.
1530 *
1531 * The name of this function is slightly confusing. This function does NOT
1532 * postpone loading, but loads the MSR right now. "hmR0VmxLazy" is simply a
1533 * common prefix for functions dealing with "lazy restoration" of the shared
1534 * MSRs.
1535 *
1536 * @param pVCpu The cross context virtual CPU structure.
1537 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1538 * out-of-sync. Make sure to update the required fields
1539 * before using them.
1540 *
1541 * @remarks No-long-jump zone!!!
1542 */
1543static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1544{
1545 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1546 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1547
1548 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1549#if HC_ARCH_BITS == 64
1550 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
1551 {
1552 /*
1553 * If the guest MSRs are not loaded -and- if all the guest MSRs are identical
1554 * to the MSRs on the CPU (which are the saved host MSRs, see assertion above) then
1555 * we can skip a few MSR writes.
1556 *
1557 * Otherwise, it implies either 1. they're not loaded, or 2. they're loaded but the
1558 * guest MSR values in the guest-CPU context might be different to what's currently
1559 * loaded in the CPU. In either case, we need to write the new guest MSR values to the
1560 * CPU, see @bugref{8728}.
1561 */
1562 if ( !(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1563 && pMixedCtx->msrKERNELGSBASE == pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr
1564 && pMixedCtx->msrLSTAR == pVCpu->hm.s.vmx.u64HostLStarMsr
1565 && pMixedCtx->msrSTAR == pVCpu->hm.s.vmx.u64HostStarMsr
1566 && pMixedCtx->msrSFMASK == pVCpu->hm.s.vmx.u64HostSFMaskMsr)
1567 {
1568#ifdef VBOX_STRICT
1569 Assert(ASMRdMsr(MSR_K8_KERNEL_GS_BASE) == pMixedCtx->msrKERNELGSBASE);
1570 Assert(ASMRdMsr(MSR_K8_LSTAR) == pMixedCtx->msrLSTAR);
1571 Assert(ASMRdMsr(MSR_K6_STAR) == pMixedCtx->msrSTAR);
1572 Assert(ASMRdMsr(MSR_K8_SF_MASK) == pMixedCtx->msrSFMASK);
1573#endif
1574 }
1575 else
1576 {
1577 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE);
1578 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR);
1579 ASMWrMsr(MSR_K6_STAR, pMixedCtx->msrSTAR);
1580 ASMWrMsr(MSR_K8_SF_MASK, pMixedCtx->msrSFMASK);
1581 }
1582 }
1583#else
1584 RT_NOREF(pMixedCtx);
1585#endif
1586 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;
1587}
1588
1589
1590/**
1591 * Performs lazy restoration of the set of host MSRs if they were previously
1592 * loaded with guest MSR values.
1593 *
1594 * @param pVCpu The cross context virtual CPU structure.
1595 *
1596 * @remarks No-long-jump zone!!!
1597 * @remarks The guest MSRs should have been saved back into the guest-CPU
1598 * context by hmR0VmxImportGuestState()!!!
1599 */
1600static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu)
1601{
1602 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1603 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1604
1605 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1606 {
1607 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1608#if HC_ARCH_BITS == 64
1609 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
1610 {
1611 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64HostLStarMsr);
1612 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64HostStarMsr);
1613 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64HostSFMaskMsr);
1614 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr);
1615 }
1616#endif
1617 }
1618 pVCpu->hm.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST);
1619}
1620
1621
1622/**
1623 * Verifies that our cached values of the VMCS fields are all consistent with
1624 * what's actually present in the VMCS.
1625 *
1626 * @returns VBox status code.
1627 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1628 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1629 * VMCS content. HMCPU error-field is
1630 * updated, see VMX_VCI_XXX.
1631 * @param pVCpu The cross context virtual CPU structure.
1632 */
1633static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu)
1634{
1635 uint32_t u32Val;
1636 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
1637 AssertRCReturn(rc, rc);
1638 AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32EntryCtls == u32Val,
1639 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32EntryCtls, u32Val),
1640 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_ENTRY,
1641 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1642
1643 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
1644 AssertRCReturn(rc, rc);
1645 AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32ExitCtls == u32Val,
1646 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32ExitCtls, u32Val),
1647 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_EXIT,
1648 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1649
1650 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1651 AssertRCReturn(rc, rc);
1652 AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32PinCtls == u32Val,
1653 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32PinCtls, u32Val),
1654 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1655 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1656
1657 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1658 AssertRCReturn(rc, rc);
1659 AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32ProcCtls == u32Val,
1660 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32ProcCtls, u32Val),
1661 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1662 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1663
1664 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1665 {
1666 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1667 AssertRCReturn(rc, rc);
1668 AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32ProcCtls2 == u32Val,
1669 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32ProcCtls2, u32Val),
1670 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1671 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1672 }
1673
1674 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1675 AssertRCReturn(rc, rc);
1676 AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32XcptBitmap == u32Val,
1677 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32XcptBitmap, u32Val),
1678 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1679 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1680
1681 uint64_t u64Val;
1682 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1683 AssertRCReturn(rc, rc);
1684 AssertMsgReturnStmt(pVCpu->hm.s.vmx.u64TscOffset == u64Val,
1685 ("Cache=%#RX64 VMCS=%#RX64\n", pVCpu->hm.s.vmx.u64TscOffset, u64Val),
1686 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1687 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1688
1689 return VINF_SUCCESS;
1690}
1691
1692
1693#ifdef VBOX_STRICT
1694/**
1695 * Verifies that our cached host EFER value has not changed
1696 * since we cached it.
1697 *
1698 * @param pVCpu The cross context virtual CPU structure.
1699 */
1700static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu)
1701{
1702 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1703
1704 if (pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
1705 {
1706 uint64_t u64Val;
1707 int rc = VMXReadVmcs64(VMX_VMCS64_HOST_EFER_FULL, &u64Val);
1708 AssertRC(rc);
1709
1710 uint64_t u64HostEferMsr = ASMRdMsr(MSR_K6_EFER);
1711 AssertMsgReturnVoid(u64HostEferMsr == u64Val, ("u64HostEferMsr=%#RX64 u64Val=%#RX64\n", u64HostEferMsr, u64Val));
1712 }
1713}
1714
1715
1716/**
1717 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the
1718 * VMCS are correct.
1719 *
1720 * @param pVCpu The cross context virtual CPU structure.
1721 */
1722static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu)
1723{
1724 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1725
1726 /* Verify MSR counts in the VMCS are what we think it should be. */
1727 uint32_t cMsrs;
1728 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1729 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1730
1731 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs); AssertRC(rc);
1732 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1733
1734 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1735 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1736
1737 PCVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1738 PCVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1739 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1740 {
1741 /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */
1742 AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32 cMsrs=%u\n", pHostMsr->u32Msr,
1743 pGuestMsr->u32Msr, cMsrs));
1744
1745 uint64_t u64Msr = ASMRdMsr(pHostMsr->u32Msr);
1746 AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
1747 pHostMsr->u32Msr, pHostMsr->u64Value, u64Msr, cMsrs));
1748
1749 /* Verify that the permissions are as expected in the MSR bitmap. */
1750 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1751 {
1752 VMXMSREXITREAD enmRead;
1753 VMXMSREXITWRITE enmWrite;
1754 rc = hmR0VmxGetMsrPermission(pVCpu, pGuestMsr->u32Msr, &enmRead, &enmWrite);
1755 AssertMsgReturnVoid(rc == VINF_SUCCESS, ("hmR0VmxGetMsrPermission! failed. rc=%Rrc\n", rc));
1756 if (pGuestMsr->u32Msr == MSR_K6_EFER)
1757 {
1758 AssertMsgReturnVoid(enmRead == VMXMSREXIT_INTERCEPT_READ, ("Passthru read for EFER!?\n"));
1759 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_INTERCEPT_WRITE, ("Passthru write for EFER!?\n"));
1760 }
1761 else
1762 {
1763 AssertMsgReturnVoid(enmRead == VMXMSREXIT_PASSTHRU_READ, ("u32Msr=%#RX32 cMsrs=%u No passthru read!\n",
1764 pGuestMsr->u32Msr, cMsrs));
1765 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 cMsrs=%u No passthru write!\n",
1766 pGuestMsr->u32Msr, cMsrs));
1767 }
1768 }
1769 }
1770}
1771#endif /* VBOX_STRICT */
1772
1773
1774/**
1775 * Flushes the TLB using EPT.
1776 *
1777 * @returns VBox status code.
1778 * @param pVCpu The cross context virtual CPU structure of the calling
1779 * EMT. Can be NULL depending on @a enmTlbFlush.
1780 * @param enmTlbFlush Type of flush.
1781 *
1782 * @remarks Caller is responsible for making sure this function is called only
1783 * when NestedPaging is supported and providing @a enmTlbFlush that is
1784 * supported by the CPU.
1785 * @remarks Can be called with interrupts disabled.
1786 */
1787static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXTLBFLUSHEPT enmTlbFlush)
1788{
1789 uint64_t au64Descriptor[2];
1790 if (enmTlbFlush == VMXTLBFLUSHEPT_ALL_CONTEXTS)
1791 au64Descriptor[0] = 0;
1792 else
1793 {
1794 Assert(pVCpu);
1795 au64Descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1796 }
1797 au64Descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1798
1799 int rc = VMXR0InvEPT(enmTlbFlush, &au64Descriptor[0]);
1800 AssertMsg(rc == VINF_SUCCESS,
1801 ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmTlbFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0, rc));
1802
1803 if ( RT_SUCCESS(rc)
1804 && pVCpu)
1805 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1806}
1807
1808
1809/**
1810 * Flushes the TLB using VPID.
1811 *
1812 * @returns VBox status code.
1813 * @param pVCpu The cross context virtual CPU structure of the calling
1814 * EMT. Can be NULL depending on @a enmTlbFlush.
1815 * @param enmTlbFlush Type of flush.
1816 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1817 * on @a enmTlbFlush).
1818 *
1819 * @remarks Can be called with interrupts disabled.
1820 */
1821static void hmR0VmxFlushVpid(PVMCPU pVCpu, VMXTLBFLUSHVPID enmTlbFlush, RTGCPTR GCPtr)
1822{
1823 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid);
1824
1825 uint64_t au64Descriptor[2];
1826 if (enmTlbFlush == VMXTLBFLUSHVPID_ALL_CONTEXTS)
1827 {
1828 au64Descriptor[0] = 0;
1829 au64Descriptor[1] = 0;
1830 }
1831 else
1832 {
1833 AssertPtr(pVCpu);
1834 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1835 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1836 au64Descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1837 au64Descriptor[1] = GCPtr;
1838 }
1839
1840 int rc = VMXR0InvVPID(enmTlbFlush, &au64Descriptor[0]);
1841 AssertMsg(rc == VINF_SUCCESS,
1842 ("VMXR0InvVPID %#x %u %RGv failed with %Rrc\n", enmTlbFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1843
1844 if ( RT_SUCCESS(rc)
1845 && pVCpu)
1846 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1847 NOREF(rc);
1848}
1849
1850
1851/**
1852 * Invalidates a guest page by guest virtual address. Only relevant for
1853 * EPT/VPID, otherwise there is nothing really to invalidate.
1854 *
1855 * @returns VBox status code.
1856 * @param pVCpu The cross context virtual CPU structure.
1857 * @param GCVirt Guest virtual address of the page to invalidate.
1858 */
1859VMMR0DECL(int) VMXR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
1860{
1861 AssertPtr(pVCpu);
1862 LogFlowFunc(("pVCpu=%p GCVirt=%RGv\n", pVCpu, GCVirt));
1863
1864 bool fFlushPending = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
1865 if (!fFlushPending)
1866 {
1867 /*
1868 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for
1869 * the EPT case. See @bugref{6043} and @bugref{6177}.
1870 *
1871 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*()
1872 * as this function maybe called in a loop with individual addresses.
1873 */
1874 PVM pVM = pVCpu->CTX_SUFF(pVM);
1875 if (pVM->hm.s.vmx.fVpid)
1876 {
1877 bool fVpidFlush = RT_BOOL(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
1878
1879#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
1880 /*
1881 * Workaround Erratum BV75, AAJ159 and others that affect several Intel CPUs
1882 * where executing INVVPID outside 64-bit mode does not flush translations of
1883 * 64-bit linear addresses, see @bugref{6208#c72}.
1884 */
1885 if (RT_HI_U32(GCVirt))
1886 fVpidFlush = false;
1887#endif
1888
1889 if (fVpidFlush)
1890 {
1891 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_INDIV_ADDR, GCVirt);
1892 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1893 }
1894 else
1895 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1896 }
1897 else if (pVM->hm.s.fNestedPaging)
1898 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1899 }
1900
1901 return VINF_SUCCESS;
1902}
1903
1904
1905/**
1906 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1907 * case where neither EPT nor VPID is supported by the CPU.
1908 *
1909 * @param pVCpu The cross context virtual CPU structure.
1910 * @param pCpu Pointer to the global HM struct.
1911 *
1912 * @remarks Called with interrupts disabled.
1913 */
1914static void hmR0VmxFlushTaggedTlbNone(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1915{
1916 AssertPtr(pVCpu);
1917 AssertPtr(pCpu);
1918
1919 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1920
1921 Assert(pCpu->idCpu != NIL_RTCPUID);
1922 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1923 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1924 pVCpu->hm.s.fForceTLBFlush = false;
1925 return;
1926}
1927
1928
1929/**
1930 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1931 *
1932 * @param pVCpu The cross context virtual CPU structure.
1933 * @param pCpu Pointer to the global HM CPU struct.
1934 *
1935 * @remarks All references to "ASID" in this function pertains to "VPID" in Intel's
1936 * nomenclature. The reason is, to avoid confusion in compare statements
1937 * since the host-CPU copies are named "ASID".
1938 *
1939 * @remarks Called with interrupts disabled.
1940 */
1941static void hmR0VmxFlushTaggedTlbBoth(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1942{
1943#ifdef VBOX_WITH_STATISTICS
1944 bool fTlbFlushed = false;
1945# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
1946# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
1947 if (!fTlbFlushed) \
1948 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
1949 } while (0)
1950#else
1951# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
1952# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
1953#endif
1954
1955 AssertPtr(pCpu);
1956 AssertPtr(pVCpu);
1957 Assert(pCpu->idCpu != NIL_RTCPUID);
1958
1959 PVM pVM = pVCpu->CTX_SUFF(pVM);
1960 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1961 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1962 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1963
1964 /*
1965 * Force a TLB flush for the first world-switch if the current CPU differs from the one we
1966 * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID
1967 * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we
1968 * cannot reuse the current ASID anymore.
1969 */
1970 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1971 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1972 {
1973 ++pCpu->uCurrentAsid;
1974 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1975 {
1976 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
1977 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1978 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1979 }
1980
1981 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1982 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1983 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1984
1985 /*
1986 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
1987 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
1988 */
1989 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmTlbFlushEpt);
1990 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1991 HMVMX_SET_TAGGED_TLB_FLUSHED();
1992 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); /* Already flushed-by-EPT, skip doing it again below. */
1993 }
1994
1995 /* Check for explicit TLB flushes. */
1996 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1997 {
1998 /*
1999 * Changes to the EPT paging structure by VMM requires flushing-by-EPT as the CPU
2000 * creates guest-physical (ie. only EPT-tagged) mappings while traversing the EPT
2001 * tables when EPT is in use. Flushing-by-VPID will only flush linear (only
2002 * VPID-tagged) and combined (EPT+VPID tagged) mappings but not guest-physical
2003 * mappings, see @bugref{6568}.
2004 *
2005 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information".
2006 */
2007 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmTlbFlushEpt);
2008 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2009 HMVMX_SET_TAGGED_TLB_FLUSHED();
2010 }
2011
2012 pVCpu->hm.s.fForceTLBFlush = false;
2013 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
2014
2015 Assert(pVCpu->hm.s.idLastCpu == pCpu->idCpu);
2016 Assert(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes);
2017 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2018 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2019 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2020 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
2021 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2022 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2023 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2024
2025 /* Update VMCS with the VPID. */
2026 int rc = VMXWriteVmcs32(VMX_VMCS16_VPID, pVCpu->hm.s.uCurrentAsid);
2027 AssertRC(rc);
2028
2029#undef HMVMX_SET_TAGGED_TLB_FLUSHED
2030}
2031
2032
2033/**
2034 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
2035 *
2036 * @returns VBox status code.
2037 * @param pVCpu The cross context virtual CPU structure.
2038 * @param pCpu Pointer to the global HM CPU struct.
2039 *
2040 * @remarks Called with interrupts disabled.
2041 */
2042static void hmR0VmxFlushTaggedTlbEpt(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2043{
2044 AssertPtr(pVCpu);
2045 AssertPtr(pCpu);
2046 Assert(pCpu->idCpu != NIL_RTCPUID);
2047 AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked without NestedPaging."));
2048 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID."));
2049
2050 /*
2051 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
2052 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
2053 */
2054 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2055 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2056 {
2057 pVCpu->hm.s.fForceTLBFlush = true;
2058 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2059 }
2060
2061 /* Check for explicit TLB flushes. */
2062 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2063 {
2064 pVCpu->hm.s.fForceTLBFlush = true;
2065 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2066 }
2067
2068 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2069 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2070
2071 if (pVCpu->hm.s.fForceTLBFlush)
2072 {
2073 hmR0VmxFlushEpt(pVCpu, pVCpu->CTX_SUFF(pVM)->hm.s.vmx.enmTlbFlushEpt);
2074 pVCpu->hm.s.fForceTLBFlush = false;
2075 }
2076}
2077
2078
2079/**
2080 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
2081 *
2082 * @returns VBox status code.
2083 * @param pVCpu The cross context virtual CPU structure.
2084 * @param pCpu Pointer to the global HM CPU struct.
2085 *
2086 * @remarks Called with interrupts disabled.
2087 */
2088static void hmR0VmxFlushTaggedTlbVpid(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2089{
2090 AssertPtr(pVCpu);
2091 AssertPtr(pCpu);
2092 Assert(pCpu->idCpu != NIL_RTCPUID);
2093 AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked without VPID."));
2094 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging"));
2095
2096 /*
2097 * Force a TLB flush for the first world switch if the current CPU differs from the one we
2098 * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID
2099 * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we
2100 * cannot reuse the current ASID anymore.
2101 */
2102 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2103 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2104 {
2105 pVCpu->hm.s.fForceTLBFlush = true;
2106 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2107 }
2108
2109 /* Check for explicit TLB flushes. */
2110 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2111 {
2112 /*
2113 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see
2114 * hmR0VmxSetupTaggedTlb()) we would need to explicitly flush in this case (add an
2115 * fExplicitFlush = true here and change the pCpu->fFlushAsidBeforeUse check below to
2116 * include fExplicitFlush's too) - an obscure corner case.
2117 */
2118 pVCpu->hm.s.fForceTLBFlush = true;
2119 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2120 }
2121
2122 PVM pVM = pVCpu->CTX_SUFF(pVM);
2123 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2124 if (pVCpu->hm.s.fForceTLBFlush)
2125 {
2126 ++pCpu->uCurrentAsid;
2127 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
2128 {
2129 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
2130 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
2131 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
2132 }
2133
2134 pVCpu->hm.s.fForceTLBFlush = false;
2135 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2136 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
2137 if (pCpu->fFlushAsidBeforeUse)
2138 {
2139 if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
2140 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
2141 else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS)
2142 {
2143 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
2144 pCpu->fFlushAsidBeforeUse = false;
2145 }
2146 else
2147 {
2148 /* hmR0VmxSetupTaggedTlb() ensures we never get here. Paranoia. */
2149 AssertMsgFailed(("Unsupported VPID-flush context type.\n"));
2150 }
2151 }
2152 }
2153
2154 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2155 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2156 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2157 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
2158 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2159 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2160 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2161
2162 int rc = VMXWriteVmcs32(VMX_VMCS16_VPID, pVCpu->hm.s.uCurrentAsid);
2163 AssertRC(rc);
2164}
2165
2166
2167/**
2168 * Flushes the guest TLB entry based on CPU capabilities.
2169 *
2170 * @param pVCpu The cross context virtual CPU structure.
2171 * @param pCpu Pointer to the global HM CPU struct.
2172 */
2173DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2174{
2175#ifdef HMVMX_ALWAYS_FLUSH_TLB
2176 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2177#endif
2178 PVM pVM = pVCpu->CTX_SUFF(pVM);
2179 switch (pVM->hm.s.vmx.enmTlbFlushType)
2180 {
2181 case VMXTLBFLUSHTYPE_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVCpu, pCpu); break;
2182 case VMXTLBFLUSHTYPE_EPT: hmR0VmxFlushTaggedTlbEpt(pVCpu, pCpu); break;
2183 case VMXTLBFLUSHTYPE_VPID: hmR0VmxFlushTaggedTlbVpid(pVCpu, pCpu); break;
2184 case VMXTLBFLUSHTYPE_NONE: hmR0VmxFlushTaggedTlbNone(pVCpu, pCpu); break;
2185 default:
2186 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
2187 break;
2188 }
2189 /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */
2190}
2191
2192
2193/**
2194 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
2195 * TLB entries from the host TLB before VM-entry.
2196 *
2197 * @returns VBox status code.
2198 * @param pVM The cross context VM structure.
2199 */
2200static int hmR0VmxSetupTaggedTlb(PVM pVM)
2201{
2202 /*
2203 * Determine optimal flush type for Nested Paging.
2204 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
2205 * guest execution (see hmR3InitFinalizeR0()).
2206 */
2207 if (pVM->hm.s.fNestedPaging)
2208 {
2209 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
2210 {
2211 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
2212 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_SINGLE_CONTEXT;
2213 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
2214 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_ALL_CONTEXTS;
2215 else
2216 {
2217 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
2218 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2219 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_FLUSH_TYPE_UNSUPPORTED;
2220 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2221 }
2222
2223 /* Make sure the write-back cacheable memory type for EPT is supported. */
2224 if (RT_UNLIKELY(!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB)))
2225 {
2226 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2227 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_MEM_TYPE_NOT_WB;
2228 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2229 }
2230
2231 /* EPT requires a page-walk length of 4. */
2232 if (RT_UNLIKELY(!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4)))
2233 {
2234 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2235 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_PAGE_WALK_LENGTH_UNSUPPORTED;
2236 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2237 }
2238 }
2239 else
2240 {
2241 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
2242 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2243 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_INVEPT_UNAVAILABLE;
2244 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2245 }
2246 }
2247
2248 /*
2249 * Determine optimal flush type for VPID.
2250 */
2251 if (pVM->hm.s.vmx.fVpid)
2252 {
2253 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
2254 {
2255 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
2256 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_SINGLE_CONTEXT;
2257 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
2258 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_ALL_CONTEXTS;
2259 else
2260 {
2261 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
2262 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2263 LogRelFunc(("Only INDIV_ADDR supported. Ignoring VPID.\n"));
2264 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
2265 LogRelFunc(("Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
2266 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED;
2267 pVM->hm.s.vmx.fVpid = false;
2268 }
2269 }
2270 else
2271 {
2272 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
2273 Log4Func(("VPID supported without INVEPT support. Ignoring VPID.\n"));
2274 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED;
2275 pVM->hm.s.vmx.fVpid = false;
2276 }
2277 }
2278
2279 /*
2280 * Setup the handler for flushing tagged-TLBs.
2281 */
2282 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
2283 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_EPT_VPID;
2284 else if (pVM->hm.s.fNestedPaging)
2285 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_EPT;
2286 else if (pVM->hm.s.vmx.fVpid)
2287 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_VPID;
2288 else
2289 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_NONE;
2290 return VINF_SUCCESS;
2291}
2292
2293
2294/**
2295 * Sets up pin-based VM-execution controls in the VMCS.
2296 *
2297 * @returns VBox status code.
2298 * @param pVCpu The cross context virtual CPU structure.
2299 *
2300 * @remarks We don't really care about optimizing vmwrites here as it's done only
2301 * once per VM and hence we don't care about VMCS-field cache comparisons.
2302 */
2303static int hmR0VmxSetupPinCtls(PVMCPU pVCpu)
2304{
2305 PVM pVM = pVCpu->CTX_SUFF(pVM);
2306 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */
2307 uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
2308
2309 fVal |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts cause a VM-exit. */
2310 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */
2311
2312 if (pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
2313 fVal |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */
2314
2315 /* Enable the VMX preemption timer. */
2316 if (pVM->hm.s.vmx.fUsePreemptTimer)
2317 {
2318 Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
2319 fVal |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
2320 }
2321
2322#if 0
2323 /* Enable posted-interrupt processing. */
2324 if (pVM->hm.s.fPostedIntrs)
2325 {
2326 Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR);
2327 Assert(pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT);
2328 fVal |= VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR;
2329 }
2330#endif
2331
2332 if ((fVal & fZap) != fVal)
2333 {
2334 LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",
2335 pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, fVal, fZap));
2336 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
2337 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2338 }
2339
2340 /* Commit it to the VMCS and update our cache. */
2341 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, fVal);
2342 AssertRCReturn(rc, rc);
2343 pVCpu->hm.s.vmx.u32PinCtls = fVal;
2344
2345 return VINF_SUCCESS;
2346}
2347
2348
2349/**
2350 * Sets up secondary processor-based VM-execution controls in the VMCS.
2351 *
2352 * @returns VBox status code.
2353 * @param pVCpu The cross context virtual CPU structure.
2354 *
2355 * @remarks We don't really care about optimizing vmwrites here as it's done only
2356 * once per VM and hence we don't care about VMCS-field cache comparisons.
2357 */
2358static int hmR0VmxSetupProcCtls2(PVMCPU pVCpu)
2359{
2360 PVM pVM = pVCpu->CTX_SUFF(pVM);
2361 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
2362 uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2363
2364 /* WBINVD causes a VM-exit. */
2365 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
2366 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT;
2367
2368 /* Enable EPT (aka nested-paging). */
2369 if (pVM->hm.s.fNestedPaging)
2370 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_EPT;
2371
2372 /*
2373 * Enable the INVPCID instruction if supported by the hardware and we expose
2374 * it to the guest. Without this, guest executing INVPCID would cause a #UD.
2375 */
2376 if ( (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
2377 && pVM->cpum.ro.GuestFeatures.fInvpcid)
2378 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
2379
2380 /* Enable VPID. */
2381 if (pVM->hm.s.vmx.fVpid)
2382 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VPID;
2383
2384 /* Enable Unrestricted guest execution. */
2385 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2386 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST;
2387
2388#if 0
2389 if (pVM->hm.s.fVirtApicRegs)
2390 {
2391 /* Enable APIC-register virtualization. */
2392 Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT);
2393 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT;
2394
2395 /* Enable virtual-interrupt delivery. */
2396 Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY);
2397 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY;
2398 }
2399#endif
2400
2401 /* Enable Virtual-APIC page accesses if supported by the CPU. This is where the TPR shadow resides. */
2402 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
2403 * done dynamically. */
2404 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
2405 {
2406 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
2407 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
2408 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
2409 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
2410 AssertRCReturn(rc, rc);
2411 }
2412
2413 /* Enable RDTSCP. */
2414 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
2415 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP;
2416
2417 /* Enable Pause-Loop exiting. */
2418 if ( pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT
2419 && pVM->hm.s.vmx.cPleGapTicks
2420 && pVM->hm.s.vmx.cPleWindowTicks)
2421 {
2422 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT;
2423
2424 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks);
2425 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks);
2426 AssertRCReturn(rc, rc);
2427 }
2428
2429 if ((fVal & fZap) != fVal)
2430 {
2431 LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",
2432 pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, fVal, fZap));
2433 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
2434 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2435 }
2436
2437 /* Commit it to the VMCS and update our cache. */
2438 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, fVal);
2439 AssertRCReturn(rc, rc);
2440 pVCpu->hm.s.vmx.u32ProcCtls2 = fVal;
2441
2442 return VINF_SUCCESS;
2443}
2444
2445
2446/**
2447 * Sets up processor-based VM-execution controls in the VMCS.
2448 *
2449 * @returns VBox status code.
2450 * @param pVCpu The cross context virtual CPU structure.
2451 *
2452 * @remarks We don't really care about optimizing vmwrites here as it's done only
2453 * once per VM and hence we don't care about VMCS-field cache comparisons.
2454 */
2455static int hmR0VmxSetupProcCtls(PVMCPU pVCpu)
2456{
2457 PVM pVM = pVCpu->CTX_SUFF(pVM);
2458 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */
2459 uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2460
2461 fVal |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */
2462 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
2463 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
2464 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
2465 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */
2466 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */
2467 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
2468
2469 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
2470 if ( !(pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
2471 || (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
2472 {
2473 LogRelFunc(("Unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
2474 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
2475 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2476 }
2477
2478 /* Without Nested Paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
2479 if (!pVM->hm.s.fNestedPaging)
2480 {
2481 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
2482 fVal |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
2483 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
2484 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
2485 }
2486
2487 /* Use TPR shadowing if supported by the CPU. */
2488 if ( PDMHasApic(pVM)
2489 && pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
2490 {
2491 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2492 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
2493 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
2494 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
2495 AssertRCReturn(rc, rc);
2496
2497 fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
2498 /* CR8 writes cause a VM-exit based on TPR threshold. */
2499 Assert(!(fVal & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
2500 Assert(!(fVal & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
2501 }
2502 else
2503 {
2504 /*
2505 * Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is invalid on 32-bit Intel CPUs.
2506 * Set this control only for 64-bit guests.
2507 */
2508 if (pVM->hm.s.fAllow64BitGuests)
2509 {
2510 fVal |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */
2511 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */
2512 }
2513 }
2514
2515 /* Use MSR-bitmaps if supported by the CPU. */
2516 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
2517 {
2518 fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
2519
2520 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2521 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
2522 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2523 AssertRCReturn(rc, rc);
2524
2525 /*
2526 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
2527 * automatically using dedicated fields in the VMCS.
2528 */
2529 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2530 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2531 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2532 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2533 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2534#if HC_ARCH_BITS == 64
2535 /*
2536 * Set passthru permissions for the following MSRs (mandatory for VT-x) required for 64-bit guests.
2537 */
2538 if (pVM->hm.s.fAllow64BitGuests)
2539 {
2540 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2541 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2542 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2543 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2544 }
2545#endif
2546 /*
2547 * The IA32_PRED_CMD MSR is write-only and has no state associated with it. We never need to intercept
2548 * access (writes need to be executed without exiting, reds will #GP-fault anyway).
2549 */
2550 if (pVM->cpum.ro.GuestFeatures.fIbpb)
2551 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_PRED_CMD, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2552
2553 /* Though MSR_IA32_PERF_GLOBAL_CTRL is saved/restored lazily, we want intercept reads/write to it for now. */
2554 }
2555
2556 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
2557 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
2558 fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
2559
2560 if ((fVal & fZap) != fVal)
2561 {
2562 LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",
2563 pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, fVal, fZap));
2564 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
2565 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2566 }
2567
2568 /* Commit it to the VMCS and update our cache. */
2569 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, fVal);
2570 AssertRCReturn(rc, rc);
2571 pVCpu->hm.s.vmx.u32ProcCtls = fVal;
2572
2573 /* Set up secondary processor-based VM-execution controls if the CPU supports it. */
2574 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
2575 return hmR0VmxSetupProcCtls2(pVCpu);
2576
2577 /* Sanity check, should not really happen. */
2578 if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest))
2579 {
2580 LogRelFunc(("Unrestricted Guest enabled when secondary processor-based VM-execution controls not available\n"));
2581 pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;
2582 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2583 }
2584
2585 /* Old CPUs without secondary processor-based VM-execution controls would end up here. */
2586 return VINF_SUCCESS;
2587}
2588
2589
2590/**
2591 * Sets up miscellaneous (everything other than Pin & Processor-based
2592 * VM-execution) control fields in the VMCS.
2593 *
2594 * @returns VBox status code.
2595 * @param pVCpu The cross context virtual CPU structure.
2596 */
2597static int hmR0VmxSetupMiscCtls(PVMCPU pVCpu)
2598{
2599 AssertPtr(pVCpu);
2600
2601 int rc = VERR_GENERAL_FAILURE;
2602
2603 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2604#if 0
2605 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxExportGuestCR3AndCR4())*/
2606 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0);
2607 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0);
2608
2609 /*
2610 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
2611 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
2612 * We thus use the exception bitmap to control it rather than use both.
2613 */
2614 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0);
2615 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0);
2616
2617 /* All IO & IOIO instructions cause VM-exits. */
2618 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0);
2619 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0);
2620
2621 /* Initialize the MSR-bitmap area. */
2622 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
2623 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
2624 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0);
2625 AssertRCReturn(rc, rc);
2626#endif
2627
2628 /* Setup MSR auto-load/store area. */
2629 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
2630 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
2631 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2632 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2633 AssertRCReturn(rc, rc);
2634
2635 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
2636 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
2637 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
2638 AssertRCReturn(rc, rc);
2639
2640 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
2641 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
2642 AssertRCReturn(rc, rc);
2643
2644 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2645#if 0
2646 /* Setup debug controls */
2647 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0);
2648 rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
2649 AssertRCReturn(rc, rc);
2650#endif
2651
2652 return rc;
2653}
2654
2655
2656/**
2657 * Sets up the initial exception bitmap in the VMCS based on static conditions.
2658 *
2659 * We shall setup those exception intercepts that don't change during the
2660 * lifetime of the VM here. The rest are done dynamically while loading the
2661 * guest state.
2662 *
2663 * @returns VBox status code.
2664 * @param pVCpu The cross context virtual CPU structure.
2665 */
2666static int hmR0VmxInitXcptBitmap(PVMCPU pVCpu)
2667{
2668 AssertPtr(pVCpu);
2669
2670 uint32_t uXcptBitmap;
2671
2672 /* Must always intercept #AC to prevent the guest from hanging the CPU. */
2673 uXcptBitmap = RT_BIT_32(X86_XCPT_AC);
2674
2675 /* Because we need to maintain the DR6 state even when intercepting DRx reads
2676 and writes, and because recursive #DBs can cause the CPU hang, we must always
2677 intercept #DB. */
2678 uXcptBitmap |= RT_BIT_32(X86_XCPT_DB);
2679
2680 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
2681 if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
2682 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2683
2684 /* Commit it to the VMCS. */
2685 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2686 AssertRCReturn(rc, rc);
2687
2688 /* Update our cache of the exception bitmap. */
2689 pVCpu->hm.s.vmx.u32XcptBitmap = uXcptBitmap;
2690 return VINF_SUCCESS;
2691}
2692
2693
2694/**
2695 * Does per-VM VT-x initialization.
2696 *
2697 * @returns VBox status code.
2698 * @param pVM The cross context VM structure.
2699 */
2700VMMR0DECL(int) VMXR0InitVM(PVM pVM)
2701{
2702 LogFlowFunc(("pVM=%p\n", pVM));
2703
2704 int rc = hmR0VmxStructsAlloc(pVM);
2705 if (RT_FAILURE(rc))
2706 {
2707 LogRelFunc(("hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
2708 return rc;
2709 }
2710
2711 return VINF_SUCCESS;
2712}
2713
2714
2715/**
2716 * Does per-VM VT-x termination.
2717 *
2718 * @returns VBox status code.
2719 * @param pVM The cross context VM structure.
2720 */
2721VMMR0DECL(int) VMXR0TermVM(PVM pVM)
2722{
2723 LogFlowFunc(("pVM=%p\n", pVM));
2724
2725#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2726 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
2727 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
2728#endif
2729 hmR0VmxStructsFree(pVM);
2730 return VINF_SUCCESS;
2731}
2732
2733
2734/**
2735 * Sets up the VM for execution under VT-x.
2736 * This function is only called once per-VM during initialization.
2737 *
2738 * @returns VBox status code.
2739 * @param pVM The cross context VM structure.
2740 */
2741VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
2742{
2743 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
2744 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2745
2746 LogFlowFunc(("pVM=%p\n", pVM));
2747
2748 /*
2749 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be
2750 * allocated. We no longer support the highly unlikely case of UnrestrictedGuest without
2751 * pRealModeTSS, see hmR3InitFinalizeR0Intel().
2752 */
2753 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
2754 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
2755 || !pVM->hm.s.vmx.pRealModeTSS))
2756 {
2757 LogRelFunc(("Invalid real-on-v86 state.\n"));
2758 return VERR_INTERNAL_ERROR;
2759 }
2760
2761 /* Initialize these always, see hmR3InitFinalizeR0().*/
2762 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NONE;
2763 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NONE;
2764
2765 /* Setup the tagged-TLB flush handlers. */
2766 int rc = hmR0VmxSetupTaggedTlb(pVM);
2767 if (RT_FAILURE(rc))
2768 {
2769 LogRelFunc(("hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
2770 return rc;
2771 }
2772
2773 /* Check if we can use the VMCS controls for swapping the EFER MSR. */
2774 Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
2775#if HC_ARCH_BITS == 64
2776 if ( (pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
2777 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
2778 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR))
2779 {
2780 pVM->hm.s.vmx.fSupportsVmcsEfer = true;
2781 }
2782#endif
2783
2784 /* At least verify VMX is enabled, since we can't check if we're in VMX root mode without #GP'ing. */
2785 RTCCUINTREG uHostCR4 = ASMGetCR4();
2786 if (RT_UNLIKELY(!(uHostCR4 & X86_CR4_VMXE)))
2787 return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
2788
2789 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2790 {
2791 PVMCPU pVCpu = &pVM->aCpus[i];
2792 AssertPtr(pVCpu);
2793 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
2794
2795 /* Log the VCPU pointers, useful for debugging SMP VMs. */
2796 Log4Func(("pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
2797
2798 /* Set revision dword at the beginning of the VMCS structure. */
2799 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
2800
2801 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
2802 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2803 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc\n", rc),
2804 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2805
2806 /* Load this VMCS as the current VMCS. */
2807 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2808 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc\n", rc),
2809 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2810
2811 rc = hmR0VmxSetupPinCtls(pVCpu);
2812 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc\n", rc),
2813 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2814
2815 rc = hmR0VmxSetupProcCtls(pVCpu);
2816 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc\n", rc),
2817 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2818
2819 rc = hmR0VmxSetupMiscCtls(pVCpu);
2820 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc\n", rc),
2821 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2822
2823 rc = hmR0VmxInitXcptBitmap(pVCpu);
2824 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc\n", rc),
2825 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2826
2827#if HC_ARCH_BITS == 32
2828 rc = hmR0VmxInitVmcsReadCache(pVCpu);
2829 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc\n", rc),
2830 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2831#endif
2832
2833 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
2834 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2835 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc\n", rc),
2836 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2837
2838 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
2839
2840 hmR0VmxUpdateErrorRecord(pVCpu, rc);
2841 }
2842
2843 return VINF_SUCCESS;
2844}
2845
2846
2847/**
2848 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2849 * the VMCS.
2850 *
2851 * @returns VBox status code.
2852 */
2853static int hmR0VmxExportHostControlRegs(void)
2854{
2855 RTCCUINTREG uReg = ASMGetCR0();
2856 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2857 AssertRCReturn(rc, rc);
2858
2859 uReg = ASMGetCR3();
2860 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2861 AssertRCReturn(rc, rc);
2862
2863 uReg = ASMGetCR4();
2864 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2865 AssertRCReturn(rc, rc);
2866 return rc;
2867}
2868
2869
2870/**
2871 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2872 * the host-state area in the VMCS.
2873 *
2874 * @returns VBox status code.
2875 * @param pVCpu The cross context virtual CPU structure.
2876 */
2877static int hmR0VmxExportHostSegmentRegs(PVMCPU pVCpu)
2878{
2879#if HC_ARCH_BITS == 64
2880/**
2881 * Macro for adjusting host segment selectors to satisfy VT-x's VM-entry
2882 * requirements. See hmR0VmxExportHostSegmentRegs().
2883 */
2884# define VMXLOCAL_ADJUST_HOST_SEG(seg, selValue) \
2885 if ((selValue) & (X86_SEL_RPL | X86_SEL_LDT)) \
2886 { \
2887 bool fValidSelector = true; \
2888 if ((selValue) & X86_SEL_LDT) \
2889 { \
2890 uint32_t uAttr = ASMGetSegAttr((selValue)); \
2891 fValidSelector = RT_BOOL(uAttr != UINT32_MAX && (uAttr & X86_DESC_P)); \
2892 } \
2893 if (fValidSelector) \
2894 { \
2895 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##seg; \
2896 pVCpu->hm.s.vmx.RestoreHost.uHostSel##seg = (selValue); \
2897 } \
2898 (selValue) = 0; \
2899 }
2900
2901 /*
2902 * If we've executed guest code using VT-x, the host-state bits will be messed up. We
2903 * should -not- save the messed up state without restoring the original host-state,
2904 * see @bugref{7240}.
2905 *
2906 * This apparently can happen (most likely the FPU changes), deal with it rather than
2907 * asserting. Was observed booting Solaris 10u10 32-bit guest.
2908 */
2909 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
2910 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
2911 {
2912 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags,
2913 pVCpu->idCpu));
2914 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
2915 }
2916 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
2917#else
2918 RT_NOREF(pVCpu);
2919#endif
2920
2921 /*
2922 * Host DS, ES, FS and GS segment registers.
2923 */
2924#if HC_ARCH_BITS == 64
2925 RTSEL uSelDS = ASMGetDS();
2926 RTSEL uSelES = ASMGetES();
2927 RTSEL uSelFS = ASMGetFS();
2928 RTSEL uSelGS = ASMGetGS();
2929#else
2930 RTSEL uSelDS = 0;
2931 RTSEL uSelES = 0;
2932 RTSEL uSelFS = 0;
2933 RTSEL uSelGS = 0;
2934#endif
2935
2936 /*
2937 * Host CS and SS segment registers.
2938 */
2939 RTSEL uSelCS = ASMGetCS();
2940 RTSEL uSelSS = ASMGetSS();
2941
2942 /*
2943 * Host TR segment register.
2944 */
2945 RTSEL uSelTR = ASMGetTR();
2946
2947#if HC_ARCH_BITS == 64
2948 /*
2949 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to
2950 * gain VM-entry and restore them before we get preempted.
2951 *
2952 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
2953 */
2954 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS);
2955 VMXLOCAL_ADJUST_HOST_SEG(ES, uSelES);
2956 VMXLOCAL_ADJUST_HOST_SEG(FS, uSelFS);
2957 VMXLOCAL_ADJUST_HOST_SEG(GS, uSelGS);
2958# undef VMXLOCAL_ADJUST_HOST_SEG
2959#endif
2960
2961 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2962 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2963 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2964 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2965 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2966 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2967 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2968 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));
2969 Assert(uSelCS);
2970 Assert(uSelTR);
2971
2972 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2973#if 0
2974 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE))
2975 Assert(uSelSS != 0);
2976#endif
2977
2978 /* Write these host selector fields into the host-state area in the VMCS. */
2979 int rc = VMXWriteVmcs32(VMX_VMCS16_HOST_CS_SEL, uSelCS);
2980 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_SS_SEL, uSelSS);
2981#if HC_ARCH_BITS == 64
2982 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_DS_SEL, uSelDS);
2983 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_ES_SEL, uSelES);
2984 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FS_SEL, uSelFS);
2985 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_GS_SEL, uSelGS);
2986#else
2987 NOREF(uSelDS);
2988 NOREF(uSelES);
2989 NOREF(uSelFS);
2990 NOREF(uSelGS);
2991#endif
2992 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_TR_SEL, uSelTR);
2993 AssertRCReturn(rc, rc);
2994
2995 /*
2996 * Host GDTR and IDTR.
2997 */
2998 RTGDTR Gdtr;
2999 RTIDTR Idtr;
3000 RT_ZERO(Gdtr);
3001 RT_ZERO(Idtr);
3002 ASMGetGDTR(&Gdtr);
3003 ASMGetIDTR(&Idtr);
3004 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt);
3005 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt);
3006 AssertRCReturn(rc, rc);
3007
3008#if HC_ARCH_BITS == 64
3009 /*
3010 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps
3011 * them to the maximum limit (0xffff) on every VM-exit.
3012 */
3013 if (Gdtr.cbGdt != 0xffff)
3014 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
3015
3016 /*
3017 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT" and
3018 * Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit
3019 * as 0xfff, VT-x bloating the limit to 0xffff shouldn't cause any different CPU behavior.
3020 * However, several hosts either insists on 0xfff being the limit (Windows Patch Guard) or
3021 * uses the limit for other purposes (darwin puts the CPU ID in there but botches sidt
3022 * alignment in at least one consumer). So, we're only allowing the IDTR.LIMIT to be left
3023 * at 0xffff on hosts where we are sure it won't cause trouble.
3024 */
3025# if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
3026 if (Idtr.cbIdt < 0x0fff)
3027# else
3028 if (Idtr.cbIdt != 0xffff)
3029# endif
3030 {
3031 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
3032 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
3033 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
3034 }
3035#endif
3036
3037 /*
3038 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI
3039 * and RPL bits is effectively what the CPU does for "scaling by 8". TI is always 0 and
3040 * RPL should be too in most cases.
3041 */
3042 AssertMsgReturn((uSelTR | X86_SEL_RPL_LDT) <= Gdtr.cbGdt,
3043 ("TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt), VERR_VMX_INVALID_HOST_STATE);
3044
3045 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
3046#if HC_ARCH_BITS == 64
3047 uintptr_t uTRBase = X86DESC64_BASE(pDesc);
3048
3049 /*
3050 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on
3051 * all VM-exits. The type is the same for 64-bit busy TSS[1]. The limit needs manual
3052 * restoration if the host has something else. Task switching is not supported in 64-bit
3053 * mode[2], but the limit still matters as IOPM is supported in 64-bit mode. Restoring the
3054 * limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
3055 *
3056 * [1] See Intel spec. 3.5 "System Descriptor Types".
3057 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
3058 */
3059 PVM pVM = pVCpu->CTX_SUFF(pVM);
3060 Assert(pDesc->System.u4Type == 11);
3061 if ( pDesc->System.u16LimitLow != 0x67
3062 || pDesc->System.u4LimitHigh)
3063 {
3064 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
3065 /* If the host has made GDT read-only, we would need to temporarily toggle CR0.WP before writing the GDT. */
3066 if (pVM->hm.s.fHostKernelFeatures & SUPKERNELFEATURES_GDT_READ_ONLY)
3067 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_READ_ONLY;
3068 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
3069 }
3070
3071 /*
3072 * Store the GDTR as we need it when restoring the GDT and while restoring the TR.
3073 */
3074 if (pVCpu->hm.s.vmx.fRestoreHostFlags & (VMX_RESTORE_HOST_GDTR | VMX_RESTORE_HOST_SEL_TR))
3075 {
3076 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
3077 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
3078 if (pVM->hm.s.fHostKernelFeatures & SUPKERNELFEATURES_GDT_NEED_WRITABLE)
3079 {
3080 /* The GDT is read-only but the writable GDT is available. */
3081 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_NEED_WRITABLE;
3082 pVCpu->hm.s.vmx.RestoreHost.HostGdtrRw.cb = Gdtr.cbGdt;
3083 rc = SUPR0GetCurrentGdtRw(&pVCpu->hm.s.vmx.RestoreHost.HostGdtrRw.uAddr);
3084 AssertRCReturn(rc, rc);
3085 }
3086 }
3087#else
3088 uintptr_t uTRBase = X86DESC_BASE(pDesc);
3089#endif
3090 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
3091 AssertRCReturn(rc, rc);
3092
3093 /*
3094 * Host FS base and GS base.
3095 */
3096#if HC_ARCH_BITS == 64
3097 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
3098 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
3099 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase);
3100 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase);
3101 AssertRCReturn(rc, rc);
3102
3103 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
3104 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
3105 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
3106 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
3107 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
3108#endif
3109 return VINF_SUCCESS;
3110}
3111
3112
3113/**
3114 * Exports certain host MSRs in the VM-exit MSR-load area and some in the
3115 * host-state area of the VMCS.
3116 *
3117 * Theses MSRs will be automatically restored on the host after every successful
3118 * VM-exit.
3119 *
3120 * @returns VBox status code.
3121 * @param pVCpu The cross context virtual CPU structure.
3122 *
3123 * @remarks No-long-jump zone!!!
3124 */
3125static int hmR0VmxExportHostMsrs(PVMCPU pVCpu)
3126{
3127 AssertPtr(pVCpu);
3128 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
3129
3130 /*
3131 * Save MSRs that we restore lazily (due to preemption or transition to ring-3)
3132 * rather than swapping them on every VM-entry.
3133 */
3134 hmR0VmxLazySaveHostMsrs(pVCpu);
3135
3136 /*
3137 * Host Sysenter MSRs.
3138 */
3139 int rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
3140#if HC_ARCH_BITS == 32
3141 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
3142 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
3143#else
3144 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
3145 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
3146#endif
3147 AssertRCReturn(rc, rc);
3148
3149 /*
3150 * Host EFER MSR.
3151 *
3152 * If the CPU supports the newer VMCS controls for managing EFER, use it. Otherwise it's
3153 * done as part of auto-load/store MSR area in the VMCS, see hmR0VmxExportGuestMsrs().
3154 */
3155 PVM pVM = pVCpu->CTX_SUFF(pVM);
3156 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
3157 {
3158 rc = VMXWriteVmcs64(VMX_VMCS64_HOST_EFER_FULL, pVM->hm.s.vmx.u64HostEfer);
3159 AssertRCReturn(rc, rc);
3160 }
3161
3162 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see hmR0VmxExportGuestExitCtls(). */
3163
3164 return VINF_SUCCESS;
3165}
3166
3167
3168/**
3169 * Figures out if we need to swap the EFER MSR which is particularly expensive.
3170 *
3171 * We check all relevant bits. For now, that's everything besides LMA/LME, as
3172 * these two bits are handled by VM-entry, see hmR0VmxExportGuestExitCtls() and
3173 * hmR0VMxExportGuestEntryCtls().
3174 *
3175 * @returns true if we need to load guest EFER, false otherwise.
3176 * @param pVCpu The cross context virtual CPU structure.
3177 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3178 * out-of-sync. Make sure to update the required fields
3179 * before using them.
3180 *
3181 * @remarks Requires EFER, CR4.
3182 * @remarks No-long-jump zone!!!
3183 */
3184static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
3185{
3186#ifdef HMVMX_ALWAYS_SWAP_EFER
3187 return true;
3188#endif
3189
3190#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
3191 /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */
3192 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3193 return false;
3194#endif
3195
3196 PVM pVM = pVCpu->CTX_SUFF(pVM);
3197 uint64_t const u64HostEfer = pVM->hm.s.vmx.u64HostEfer;
3198 uint64_t const u64GuestEfer = pMixedCtx->msrEFER;
3199
3200 /*
3201 * For 64-bit guests, if EFER.SCE bit differs, we need to swap EFER to ensure that the
3202 * guest's SYSCALL behaviour isn't broken, see @bugref{7386}.
3203 */
3204 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
3205 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
3206 {
3207 return true;
3208 }
3209
3210 /*
3211 * If the guest uses PAE and EFER.NXE bit differs, we need to swap EFER as it
3212 * affects guest paging. 64-bit paging implies CR4.PAE as well.
3213 * See Intel spec. 4.5 "IA-32e Paging" and Intel spec. 4.1.1 "Three Paging Modes".
3214 */
3215 if ( (pMixedCtx->cr4 & X86_CR4_PAE)
3216 && (pMixedCtx->cr0 & X86_CR0_PG)
3217 && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
3218 {
3219 /* Assert that host is PAE capable. */
3220 Assert(pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_NX);
3221 return true;
3222 }
3223
3224 return false;
3225}
3226
3227
3228/**
3229 * Exports the guest state with appropriate VM-entry controls in the VMCS.
3230 *
3231 * These controls can affect things done on VM-exit; e.g. "load debug controls",
3232 * see Intel spec. 24.8.1 "VM-entry controls".
3233 *
3234 * @returns VBox status code.
3235 * @param pVCpu The cross context virtual CPU structure.
3236 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3237 * out-of-sync. Make sure to update the required fields
3238 * before using them.
3239 *
3240 * @remarks Requires EFER.
3241 * @remarks No-long-jump zone!!!
3242 */
3243static int hmR0VmxExportGuestEntryCtls(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
3244{
3245 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_CTLS)
3246 {
3247 PVM pVM = pVCpu->CTX_SUFF(pVM);
3248 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */
3249 uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3250
3251 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
3252 fVal |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
3253
3254 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
3255 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3256 {
3257 fVal |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
3258 Log4Func(("VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n"));
3259 }
3260 else
3261 Assert(!(fVal & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
3262
3263 /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */
3264 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3265 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3266 {
3267 fVal |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR;
3268 Log4Func(("VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n"));
3269 }
3270
3271 /*
3272 * The following should -not- be set (since we're not in SMM mode):
3273 * - VMX_VMCS_CTRL_ENTRY_ENTRY_SMM
3274 * - VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON
3275 */
3276
3277 /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
3278 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR. */
3279
3280 if ((fVal & fZap) != fVal)
3281 {
3282 Log4Func(("Invalid VM-entry controls combo! Cpu=%RX64 fVal=%RX64 fZap=%RX64\n",
3283 pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0, fVal, fZap));
3284 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
3285 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3286 }
3287
3288 /* Commit it to the VMCS and update our cache. */
3289 if (pVCpu->hm.s.vmx.u32EntryCtls != fVal)
3290 {
3291 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal);
3292 AssertRCReturn(rc, rc);
3293 pVCpu->hm.s.vmx.u32EntryCtls = fVal;
3294 }
3295
3296 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_CTLS);
3297 }
3298 return VINF_SUCCESS;
3299}
3300
3301
3302/**
3303 * Exports the guest state with appropriate VM-exit controls in the VMCS.
3304 *
3305 * @returns VBox status code.
3306 * @param pVCpu The cross context virtual CPU structure.
3307 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3308 * out-of-sync. Make sure to update the required fields
3309 * before using them.
3310 *
3311 * @remarks Requires EFER.
3312 */
3313static int hmR0VmxExportGuestExitCtls(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
3314{
3315 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_EXIT_CTLS)
3316 {
3317 PVM pVM = pVCpu->CTX_SUFF(pVM);
3318 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0; /* Bits set here must be set in the VMCS. */
3319 uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3320
3321 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
3322 fVal |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
3323
3324 /*
3325 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary.
3326 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in
3327 * hmR0VmxExportHostMsrs().
3328 */
3329#if HC_ARCH_BITS == 64
3330 fVal |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3331 Log4Func(("VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n"));
3332#else
3333 Assert( pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64
3334 || pVCpu->hm.s.vmx.pfnStartVM == VMXR0StartVM32);
3335 /* Set the host address-space size based on the switcher, not guest state. See @bugref{8432}. */
3336 if (pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64)
3337 {
3338 /* The switcher returns to long mode, EFER is managed by the switcher. */
3339 fVal |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3340 Log4Func(("VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n"));
3341 }
3342 else
3343 Assert(!(fVal & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
3344#endif
3345
3346 /* If the newer VMCS fields for managing EFER exists, use it. */
3347 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3348 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3349 {
3350 fVal |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR
3351 | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;
3352 Log4Func(("VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR and VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n"));
3353 }
3354
3355 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
3356 Assert(!(fVal & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
3357
3358 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
3359 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
3360 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR. */
3361
3362 /* Enable saving of the VMX preemption timer value on VM-exit. */
3363 if ( pVM->hm.s.vmx.fUsePreemptTimer
3364 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER))
3365 fVal |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
3366
3367 if ((fVal & fZap) != fVal)
3368 {
3369 LogRelFunc(("Invalid VM-exit controls combo! cpu=%RX64 fVal=%RX64 fZap=%RX64\n",
3370 pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, fVal, fZap));
3371 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
3372 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3373 }
3374
3375 /* Commit it to the VMCS and update our cache. */
3376 if (pVCpu->hm.s.vmx.u32ExitCtls != fVal)
3377 {
3378 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal);
3379 AssertRCReturn(rc, rc);
3380 pVCpu->hm.s.vmx.u32ExitCtls = fVal;
3381 }
3382
3383 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_EXIT_CTLS);
3384 }
3385 return VINF_SUCCESS;
3386}
3387
3388
3389/**
3390 * Sets the TPR threshold in the VMCS.
3391 *
3392 * @returns VBox status code.
3393 * @param pVCpu The cross context virtual CPU structure.
3394 * @param u32TprThreshold The TPR threshold (task-priority class only).
3395 */
3396DECLINLINE(int) hmR0VmxApicSetTprThreshold(PVMCPU pVCpu, uint32_t u32TprThreshold)
3397{
3398 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
3399 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW); RT_NOREF_PV(pVCpu);
3400 return VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
3401}
3402
3403
3404/**
3405 * Exports the guest APIC TPR state into the VMCS.
3406 *
3407 * @returns VBox status code.
3408 * @param pVCpu The cross context virtual CPU structure.
3409 *
3410 * @remarks No-long-jump zone!!!
3411 */
3412static int hmR0VmxExportGuestApicTpr(PVMCPU pVCpu)
3413{
3414 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
3415 {
3416 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
3417
3418 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
3419 && APICIsEnabled(pVCpu))
3420 {
3421 /*
3422 * Setup TPR shadowing.
3423 */
3424 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
3425 {
3426 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
3427
3428 bool fPendingIntr = false;
3429 uint8_t u8Tpr = 0;
3430 uint8_t u8PendingIntr = 0;
3431 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
3432 AssertRCReturn(rc, rc);
3433
3434 /*
3435 * If there are interrupts pending but masked by the TPR, instruct VT-x to
3436 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
3437 * priority of the pending interrupt so we can deliver the interrupt. If there
3438 * are no interrupts pending, set threshold to 0 to not cause any
3439 * TPR-below-threshold VM-exits.
3440 */
3441 pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR] = u8Tpr;
3442 uint32_t u32TprThreshold = 0;
3443 if (fPendingIntr)
3444 {
3445 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */
3446 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
3447 const uint8_t u8TprPriority = u8Tpr >> 4;
3448 if (u8PendingPriority <= u8TprPriority)
3449 u32TprThreshold = u8PendingPriority;
3450 }
3451
3452 rc = hmR0VmxApicSetTprThreshold(pVCpu, u32TprThreshold);
3453 AssertRCReturn(rc, rc);
3454 }
3455 }
3456 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
3457 }
3458 return VINF_SUCCESS;
3459}
3460
3461
3462/**
3463 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
3464 *
3465 * @returns Guest's interruptibility-state.
3466 * @param pVCpu The cross context virtual CPU structure.
3467 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3468 * out-of-sync. Make sure to update the required fields
3469 * before using them.
3470 *
3471 * @remarks No-long-jump zone!!!
3472 */
3473static uint32_t hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3474{
3475 /*
3476 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
3477 */
3478 uint32_t fIntrState = 0;
3479 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3480 {
3481 /* If inhibition is active, RIP & RFLAGS should've been accessed
3482 (i.e. read previously from the VMCS or from ring-3). */
3483#ifdef VBOX_STRICT
3484 uint64_t const fExtrn = ASMAtomicUoReadU64(&pMixedCtx->fExtrn);
3485 AssertMsg(!(fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)), ("%#x\n", fExtrn));
3486#endif
3487 if (pMixedCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
3488 {
3489 if (pMixedCtx->eflags.Bits.u1IF)
3490 fIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
3491 else
3492 fIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
3493 }
3494 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3495 {
3496 /*
3497 * We can clear the inhibit force flag as even if we go back to the recompiler
3498 * without executing guest code in VT-x, the flag's condition to be cleared is
3499 * met and thus the cleared state is correct.
3500 */
3501 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3502 }
3503 }
3504
3505 /*
3506 * NMIs to the guest are blocked after an NMI is injected until the guest executes an IRET. We only
3507 * bother with virtual-NMI blocking when we have support for virtual NMIs in the CPU, otherwise
3508 * setting this would block host-NMIs and IRET will not clear the blocking.
3509 *
3510 * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}.
3511 */
3512 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)
3513 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
3514 {
3515 fIntrState |= VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI;
3516 }
3517
3518 return fIntrState;
3519}
3520
3521
3522/**
3523 * Exports the guest's interruptibility-state into the guest-state area in the
3524 * VMCS.
3525 *
3526 * @returns VBox status code.
3527 * @param pVCpu The cross context virtual CPU structure.
3528 * @param fIntrState The interruptibility-state to set.
3529 */
3530static int hmR0VmxExportGuestIntrState(PVMCPU pVCpu, uint32_t fIntrState)
3531{
3532 NOREF(pVCpu);
3533 AssertMsg(!(fIntrState & 0xfffffff0), ("%#x\n", fIntrState)); /* Bits 31:4 MBZ. */
3534 Assert((fIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
3535 return VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, fIntrState);
3536}
3537
3538
3539/**
3540 * Exports the exception intercepts required for guest execution in the VMCS.
3541 *
3542 * @returns VBox status code.
3543 * @param pVCpu The cross context virtual CPU structure.
3544 *
3545 * @remarks No-long-jump zone!!!
3546 */
3547static int hmR0VmxExportGuestXcptIntercepts(PVMCPU pVCpu)
3548{
3549 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS)
3550 {
3551 uint32_t uXcptBitmap = pVCpu->hm.s.vmx.u32XcptBitmap;
3552
3553 /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxExportSharedCR0(). */
3554 if (pVCpu->hm.s.fGIMTrapXcptUD)
3555 uXcptBitmap |= RT_BIT(X86_XCPT_UD);
3556#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3557 else
3558 uXcptBitmap &= ~RT_BIT(X86_XCPT_UD);
3559#endif
3560
3561 Assert(uXcptBitmap & RT_BIT_32(X86_XCPT_AC));
3562 Assert(uXcptBitmap & RT_BIT_32(X86_XCPT_DB));
3563
3564 if (uXcptBitmap != pVCpu->hm.s.vmx.u32XcptBitmap)
3565 {
3566 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
3567 AssertRCReturn(rc, rc);
3568 pVCpu->hm.s.vmx.u32XcptBitmap = uXcptBitmap;
3569 }
3570
3571 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
3572 Log4Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64\n", uXcptBitmap));
3573 }
3574 return VINF_SUCCESS;
3575}
3576
3577
3578/**
3579 * Exports the guest's RIP into the guest-state area in the VMCS.
3580 *
3581 * @returns VBox status code.
3582 * @param pVCpu The cross context virtual CPU structure.
3583 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3584 * out-of-sync. Make sure to update the required fields
3585 * before using them.
3586 *
3587 * @remarks No-long-jump zone!!!
3588 */
3589static int hmR0VmxExportGuestRip(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
3590{
3591 int rc = VINF_SUCCESS;
3592 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP)
3593 {
3594 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
3595
3596 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
3597 AssertRCReturn(rc, rc);
3598
3599 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RIP);
3600 Log4Func(("RIP=%#RX64\n", pMixedCtx->rip));
3601 }
3602 return rc;
3603}
3604
3605
3606/**
3607 * Exports the guest's RSP into the guest-state area in the VMCS.
3608 *
3609 * @returns VBox status code.
3610 * @param pVCpu The cross context virtual CPU structure.
3611 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3612 * out-of-sync. Make sure to update the required fields
3613 * before using them.
3614 *
3615 * @remarks No-long-jump zone!!!
3616 */
3617static int hmR0VmxExportGuestRsp(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
3618{
3619 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RSP)
3620 {
3621 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP);
3622
3623 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
3624 AssertRCReturn(rc, rc);
3625
3626 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RSP);
3627 }
3628 return VINF_SUCCESS;
3629}
3630
3631
3632/**
3633 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
3634 *
3635 * @returns VBox status code.
3636 * @param pVCpu The cross context virtual CPU structure.
3637 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3638 * out-of-sync. Make sure to update the required fields
3639 * before using them.
3640 *
3641 * @remarks No-long-jump zone!!!
3642 */
3643static int hmR0VmxExportGuestRflags(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
3644{
3645 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
3646 {
3647 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
3648
3649 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
3650 Let us assert it as such and use 32-bit VMWRITE. */
3651 Assert(!RT_HI_U32(pMixedCtx->rflags.u64));
3652 X86EFLAGS fEFlags = pMixedCtx->eflags;
3653 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
3654 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
3655
3656 /*
3657 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
3658 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
3659 * can run the real-mode guest code under Virtual 8086 mode.
3660 */
3661 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3662 {
3663 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3664 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3665 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
3666 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
3667 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
3668 }
3669
3670 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
3671 AssertRCReturn(rc, rc);
3672
3673 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
3674 Log4Func(("EFlags=%#RX32\n", fEFlags.u32));
3675 }
3676 return VINF_SUCCESS;
3677}
3678
3679
3680/**
3681 * Exports the guest CR0 control register into the guest-state area in the VMCS.
3682 *
3683 * The guest FPU state is always pre-loaded hence we don't need to bother about
3684 * sharing FPU related CR0 bits between the guest and host.
3685 *
3686 * @returns VBox status code.
3687 * @param pVCpu The cross context virtual CPU structure.
3688 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3689 * out-of-sync. Make sure to update the required fields
3690 * before using them.
3691 *
3692 * @remarks No-long-jump zone!!!
3693 */
3694static int hmR0VmxExportGuestCR0(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
3695{
3696 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR0)
3697 {
3698 PVM pVM = pVCpu->CTX_SUFF(pVM);
3699 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
3700 Assert(!RT_HI_U32(pMixedCtx->cr0));
3701
3702 uint32_t const u32ShadowCr0 = pMixedCtx->cr0;
3703 uint32_t u32GuestCr0 = pMixedCtx->cr0;
3704
3705 /*
3706 * Setup VT-x's view of the guest CR0.
3707 * Minimize VM-exits due to CR3 changes when we have NestedPaging.
3708 */
3709 uint32_t uProcCtls = pVCpu->hm.s.vmx.u32ProcCtls;
3710 if (pVM->hm.s.fNestedPaging)
3711 {
3712 if (CPUMIsGuestPagingEnabled(pVCpu))
3713 {
3714 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
3715 uProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3716 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
3717 }
3718 else
3719 {
3720 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
3721 uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3722 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3723 }
3724
3725 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
3726 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3727 uProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3728 }
3729 else
3730 {
3731 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
3732 u32GuestCr0 |= X86_CR0_WP;
3733 }
3734
3735 /*
3736 * Guest FPU bits.
3737 *
3738 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
3739 * using CR0.TS.
3740 *
3741 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
3742 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
3743 */
3744 u32GuestCr0 |= X86_CR0_NE;
3745
3746 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
3747 bool const fInterceptMF = !(u32ShadowCr0 & X86_CR0_NE);
3748
3749 /*
3750 * Update exception intercepts.
3751 */
3752 uint32_t uXcptBitmap = pVCpu->hm.s.vmx.u32XcptBitmap;
3753 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3754 {
3755 Assert(PDMVmmDevHeapIsEnabled(pVM));
3756 Assert(pVM->hm.s.vmx.pRealModeTSS);
3757 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
3758 }
3759 else
3760 {
3761 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
3762 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
3763 if (fInterceptMF)
3764 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
3765 }
3766
3767 /* Additional intercepts for debugging, define these yourself explicitly. */
3768#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3769 uXcptBitmap |= 0
3770 | RT_BIT(X86_XCPT_BP)
3771 | RT_BIT(X86_XCPT_DE)
3772 | RT_BIT(X86_XCPT_NM)
3773 | RT_BIT(X86_XCPT_TS)
3774 | RT_BIT(X86_XCPT_UD)
3775 | RT_BIT(X86_XCPT_NP)
3776 | RT_BIT(X86_XCPT_SS)
3777 | RT_BIT(X86_XCPT_GP)
3778 | RT_BIT(X86_XCPT_PF)
3779 | RT_BIT(X86_XCPT_MF)
3780 ;
3781#elif defined(HMVMX_ALWAYS_TRAP_PF)
3782 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
3783#endif
3784 Assert(pVM->hm.s.fNestedPaging || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
3785
3786 /*
3787 * Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW).
3788 */
3789 uint32_t fSetCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3790 uint32_t fZapCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3791 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
3792 fSetCr0 &= ~(X86_CR0_PE | X86_CR0_PG);
3793 else
3794 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
3795
3796 u32GuestCr0 |= fSetCr0;
3797 u32GuestCr0 &= fZapCr0;
3798 u32GuestCr0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
3799
3800 /*
3801 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
3802 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
3803 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
3804 */
3805 uint32_t u32Cr0Mask = X86_CR0_PE
3806 | X86_CR0_NE
3807 | (pVM->hm.s.fNestedPaging ? 0 : X86_CR0_WP)
3808 | X86_CR0_PG
3809 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
3810 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
3811 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
3812
3813 /** @todo Avoid intercepting CR0.PE with unrestricted guests. Fix PGM
3814 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
3815 * and @bugref{6944}. */
3816#if 0
3817 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3818 u32Cr0Mask &= ~X86_CR0_PE;
3819#endif
3820 /*
3821 * Finally, update VMCS fields with the CR0 values and the exception bitmap.
3822 */
3823 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCr0);
3824 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32ShadowCr0);
3825 if (u32Cr0Mask != pVCpu->hm.s.vmx.u32Cr0Mask)
3826 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32Cr0Mask);
3827 if (uProcCtls != pVCpu->hm.s.vmx.u32ProcCtls)
3828 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
3829 if (uXcptBitmap != pVCpu->hm.s.vmx.u32XcptBitmap)
3830 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
3831 AssertRCReturn(rc, rc);
3832
3833 /* Update our caches. */
3834 pVCpu->hm.s.vmx.u32Cr0Mask = u32Cr0Mask;
3835 pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls;
3836 pVCpu->hm.s.vmx.u32XcptBitmap = uXcptBitmap;
3837
3838 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR0);
3839
3840 Log4Func(("u32Cr0Mask=%#RX32 u32ShadowCr0=%#RX32 u32GuestCr0=%#RX32 (fSetCr0=%#RX32 fZapCr0=%#RX32\n", u32Cr0Mask,
3841 u32ShadowCr0, u32GuestCr0, fSetCr0, fZapCr0));
3842 }
3843
3844 return VINF_SUCCESS;
3845}
3846
3847
3848/**
3849 * Exports the guest control registers (CR3, CR4) into the guest-state area
3850 * in the VMCS.
3851 *
3852 * @returns VBox strict status code.
3853 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
3854 * without unrestricted guest access and the VMMDev is not presently
3855 * mapped (e.g. EFI32).
3856 *
3857 * @param pVCpu The cross context virtual CPU structure.
3858 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3859 * out-of-sync. Make sure to update the required fields
3860 * before using them.
3861 *
3862 * @remarks No-long-jump zone!!!
3863 */
3864static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
3865{
3866 int rc = VINF_SUCCESS;
3867 PVM pVM = pVCpu->CTX_SUFF(pVM);
3868
3869 /*
3870 * Guest CR2.
3871 * It's always loaded in the assembler code. Nothing to do here.
3872 */
3873
3874 /*
3875 * Guest CR3.
3876 */
3877 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR3)
3878 {
3879 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
3880
3881 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
3882 if (pVM->hm.s.fNestedPaging)
3883 {
3884 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
3885
3886 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
3887 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
3888 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
3889 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
3890
3891 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
3892 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
3893 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
3894
3895 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
3896 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
3897 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
3898 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3899 AssertMsg( !((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
3900 || (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY),
3901 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3902
3903 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
3904 AssertRCReturn(rc, rc);
3905
3906 if ( pVM->hm.s.vmx.fUnrestrictedGuest
3907 || CPUMIsGuestPagingEnabledEx(pMixedCtx))
3908 {
3909 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
3910 if (CPUMIsGuestInPAEModeEx(pMixedCtx))
3911 {
3912 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
3913 AssertRCReturn(rc, rc);
3914 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u);
3915 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u);
3916 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u);
3917 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u);
3918 AssertRCReturn(rc, rc);
3919 }
3920
3921 /*
3922 * The guest's view of its CR3 is unblemished with Nested Paging when the
3923 * guest is using paging or we have unrestricted guest execution to handle
3924 * the guest when it's not using paging.
3925 */
3926 GCPhysGuestCR3 = pMixedCtx->cr3;
3927 }
3928 else
3929 {
3930 /*
3931 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
3932 * thinks it accesses physical memory directly, we use our identity-mapped
3933 * page table to map guest-linear to guest-physical addresses. EPT takes care
3934 * of translating it to host-physical addresses.
3935 */
3936 RTGCPHYS GCPhys;
3937 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
3938
3939 /* We obtain it here every time as the guest could have relocated this PCI region. */
3940 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
3941 if (RT_SUCCESS(rc))
3942 { /* likely */ }
3943 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
3944 {
3945 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
3946 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
3947 }
3948 else
3949 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
3950
3951 GCPhysGuestCR3 = GCPhys;
3952 }
3953
3954 Log4Func(("u32GuestCr3=%#RGp (GstN)\n", GCPhysGuestCR3));
3955 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
3956 AssertRCReturn(rc, rc);
3957 }
3958 else
3959 {
3960 /* Non-nested paging case, just use the hypervisor's CR3. */
3961 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
3962
3963 Log4Func(("u32GuestCr3=%#RHv (HstN)\n", HCPhysGuestCR3));
3964 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
3965 AssertRCReturn(rc, rc);
3966 }
3967
3968 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR3);
3969 }
3970
3971 /*
3972 * Guest CR4.
3973 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
3974 */
3975 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR4)
3976 {
3977 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
3978 Assert(!RT_HI_U32(pMixedCtx->cr4));
3979
3980 uint32_t u32GuestCr4 = pMixedCtx->cr4;
3981 uint32_t const u32ShadowCr4 = pMixedCtx->cr4;
3982
3983 /*
3984 * Setup VT-x's view of the guest CR4.
3985 *
3986 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
3987 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
3988 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
3989 *
3990 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
3991 */
3992 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3993 {
3994 Assert(pVM->hm.s.vmx.pRealModeTSS);
3995 Assert(PDMVmmDevHeapIsEnabled(pVM));
3996 u32GuestCr4 &= ~X86_CR4_VME;
3997 }
3998
3999 if (pVM->hm.s.fNestedPaging)
4000 {
4001 if ( !CPUMIsGuestPagingEnabledEx(pMixedCtx)
4002 && !pVM->hm.s.vmx.fUnrestrictedGuest)
4003 {
4004 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
4005 u32GuestCr4 |= X86_CR4_PSE;
4006 /* Our identity mapping is a 32-bit page directory. */
4007 u32GuestCr4 &= ~X86_CR4_PAE;
4008 }
4009 /* else use guest CR4.*/
4010 }
4011 else
4012 {
4013 /*
4014 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
4015 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
4016 */
4017 switch (pVCpu->hm.s.enmShadowMode)
4018 {
4019 case PGMMODE_REAL: /* Real-mode. */
4020 case PGMMODE_PROTECTED: /* Protected mode without paging. */
4021 case PGMMODE_32_BIT: /* 32-bit paging. */
4022 {
4023 u32GuestCr4 &= ~X86_CR4_PAE;
4024 break;
4025 }
4026
4027 case PGMMODE_PAE: /* PAE paging. */
4028 case PGMMODE_PAE_NX: /* PAE paging with NX. */
4029 {
4030 u32GuestCr4 |= X86_CR4_PAE;
4031 break;
4032 }
4033
4034 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
4035 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
4036#ifdef VBOX_ENABLE_64_BITS_GUESTS
4037 break;
4038#endif
4039 default:
4040 AssertFailed();
4041 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4042 }
4043 }
4044
4045 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
4046 uint64_t const fSetCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
4047 uint64_t const fZapCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
4048 u32GuestCr4 |= fSetCr4;
4049 u32GuestCr4 &= fZapCr4;
4050
4051 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them,
4052 that would cause a VM-exit. */
4053 uint32_t u32Cr4Mask = X86_CR4_VME
4054 | X86_CR4_PAE
4055 | X86_CR4_PGE
4056 | X86_CR4_PSE
4057 | X86_CR4_VMXE;
4058 if (pVM->cpum.ro.HostFeatures.fXSaveRstor)
4059 u32Cr4Mask |= X86_CR4_OSXSAVE;
4060 if (pVM->cpum.ro.GuestFeatures.fPcid)
4061 u32Cr4Mask |= X86_CR4_PCIDE;
4062
4063 /* Write VT-x's view of the guest CR4, the CR4 modify mask and the read-only CR4 shadow
4064 into the VMCS and update our cache. */
4065 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCr4);
4066 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32ShadowCr4);
4067 if (pVCpu->hm.s.vmx.u32Cr4Mask != u32Cr4Mask)
4068 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32Cr4Mask);
4069 AssertRCReturn(rc, rc);
4070 pVCpu->hm.s.vmx.u32Cr4Mask = u32Cr4Mask;
4071
4072 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
4073 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
4074
4075 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4);
4076
4077 Log4Func(("u32GuestCr4=%#RX32 u32ShadowCr4=%#RX32 (fSetCr4=%#RX32 fZapCr4=%#RX32)\n", u32GuestCr4, u32ShadowCr4, fSetCr4,
4078 fZapCr4));
4079 }
4080 return rc;
4081}
4082
4083
4084/**
4085 * Exports the guest debug registers into the guest-state area in the VMCS.
4086 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
4087 *
4088 * This also sets up whether \#DB and MOV DRx accesses cause VM-exits.
4089 *
4090 * @returns VBox status code.
4091 * @param pVCpu The cross context virtual CPU structure.
4092 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4093 * out-of-sync. Make sure to update the required fields
4094 * before using them.
4095 *
4096 * @remarks No-long-jump zone!!!
4097 */
4098static int hmR0VmxExportSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4099{
4100 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4101
4102#ifdef VBOX_STRICT
4103 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
4104 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
4105 {
4106 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
4107 Assert((pMixedCtx->dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0); /* Bits 63:32, 15, 14, 12, 11 are reserved. */
4108 Assert((pMixedCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); /* Bit 10 is reserved (RA1). */
4109 }
4110#endif
4111
4112 bool fSteppingDB = false;
4113 bool fInterceptMovDRx = false;
4114 uint32_t uProcCtls = pVCpu->hm.s.vmx.u32ProcCtls;
4115 if (pVCpu->hm.s.fSingleInstruction)
4116 {
4117 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
4118 PVM pVM = pVCpu->CTX_SUFF(pVM);
4119 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
4120 {
4121 uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
4122 Assert(fSteppingDB == false);
4123 }
4124 else
4125 {
4126 pMixedCtx->eflags.u32 |= X86_EFL_TF;
4127 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_RFLAGS;
4128 pVCpu->hm.s.fClearTrapFlag = true;
4129 fSteppingDB = true;
4130 }
4131 }
4132
4133 uint32_t u32GuestDr7;
4134 if ( fSteppingDB
4135 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
4136 {
4137 /*
4138 * Use the combined guest and host DRx values found in the hypervisor register set
4139 * because the debugger has breakpoints active or someone is single stepping on the
4140 * host side without a monitor trap flag.
4141 *
4142 * Note! DBGF expects a clean DR6 state before executing guest code.
4143 */
4144#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
4145 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4146 && !CPUMIsHyperDebugStateActivePending(pVCpu))
4147 {
4148 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4149 Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
4150 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
4151 }
4152 else
4153#endif
4154 if (!CPUMIsHyperDebugStateActive(pVCpu))
4155 {
4156 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4157 Assert(CPUMIsHyperDebugStateActive(pVCpu));
4158 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
4159 }
4160
4161 /* Update DR7 with the hypervisor value (other DRx registers are handled by CPUM one way or another). */
4162 u32GuestDr7 = (uint32_t)CPUMGetHyperDR7(pVCpu);
4163 pVCpu->hm.s.fUsingHyperDR7 = true;
4164 fInterceptMovDRx = true;
4165 }
4166 else
4167 {
4168 /*
4169 * If the guest has enabled debug registers, we need to load them prior to
4170 * executing guest code so they'll trigger at the right time.
4171 */
4172 if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
4173 {
4174#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
4175 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4176 && !CPUMIsGuestDebugStateActivePending(pVCpu))
4177 {
4178 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4179 Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
4180 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
4181 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4182 }
4183 else
4184#endif
4185 if (!CPUMIsGuestDebugStateActive(pVCpu))
4186 {
4187 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4188 Assert(CPUMIsGuestDebugStateActive(pVCpu));
4189 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
4190 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4191 }
4192 Assert(!fInterceptMovDRx);
4193 }
4194 /*
4195 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
4196 * must intercept #DB in order to maintain a correct DR6 guest value, and
4197 * because we need to intercept it to prevent nested #DBs from hanging the
4198 * CPU, we end up always having to intercept it. See hmR0VmxInitXcptBitmap.
4199 */
4200#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
4201 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu)
4202 && !CPUMIsGuestDebugStateActive(pVCpu))
4203#else
4204 else if (!CPUMIsGuestDebugStateActive(pVCpu))
4205#endif
4206 {
4207 fInterceptMovDRx = true;
4208 }
4209
4210 /* Update DR7 with the actual guest value. */
4211 u32GuestDr7 = pMixedCtx->dr[7];
4212 pVCpu->hm.s.fUsingHyperDR7 = false;
4213 }
4214
4215 if (fInterceptMovDRx)
4216 uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4217 else
4218 uProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4219
4220 /*
4221 * Update the processor-based VM-execution controls with the MOV-DRx intercepts and the
4222 * monitor-trap flag and update our cache.
4223 */
4224 if (uProcCtls != pVCpu->hm.s.vmx.u32ProcCtls)
4225 {
4226 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
4227 AssertRCReturn(rc2, rc2);
4228 pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls;
4229 }
4230
4231 /*
4232 * Update guest DR7.
4233 */
4234 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, u32GuestDr7);
4235 AssertRCReturn(rc, rc);
4236
4237 return VINF_SUCCESS;
4238}
4239
4240
4241#ifdef VBOX_STRICT
4242/**
4243 * Strict function to validate segment registers.
4244 *
4245 * @param pVCpu The cross context virtual CPU structure.
4246 * @param pCtx Pointer to the guest-CPU context.
4247 *
4248 * @remarks Will import guest CR0 on strict builds during validation of
4249 * segments.
4250 */
4251static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu, PCCPUMCTX pCtx)
4252{
4253 /*
4254 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
4255 *
4256 * The reason we check for attribute value 0 in this function and not just the unusable bit is
4257 * because hmR0VmxExportGuestSegmentReg() only updates the VMCS' copy of the value with the unusable bit
4258 * and doesn't change the guest-context value.
4259 */
4260 PVM pVM = pVCpu->CTX_SUFF(pVM);
4261 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);
4262 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
4263 && ( !CPUMIsGuestInRealModeEx(pCtx)
4264 && !CPUMIsGuestInV86ModeEx(pCtx)))
4265 {
4266 /* Protected mode checks */
4267 /* CS */
4268 Assert(pCtx->cs.Attr.n.u1Present);
4269 Assert(!(pCtx->cs.Attr.u & 0xf00));
4270 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
4271 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4272 || !(pCtx->cs.Attr.n.u1Granularity));
4273 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
4274 || (pCtx->cs.Attr.n.u1Granularity));
4275 /* CS cannot be loaded with NULL in protected mode. */
4276 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
4277 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4278 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
4279 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4280 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
4281 else
4282 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
4283 /* SS */
4284 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4285 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
4286 if ( !(pCtx->cr0 & X86_CR0_PE)
4287 || pCtx->cs.Attr.n.u4Type == 3)
4288 {
4289 Assert(!pCtx->ss.Attr.n.u2Dpl);
4290 }
4291 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4292 {
4293 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4294 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
4295 Assert(pCtx->ss.Attr.n.u1Present);
4296 Assert(!(pCtx->ss.Attr.u & 0xf00));
4297 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
4298 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4299 || !(pCtx->ss.Attr.n.u1Granularity));
4300 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
4301 || (pCtx->ss.Attr.n.u1Granularity));
4302 }
4303 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSegmentReg(). */
4304 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4305 {
4306 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4307 Assert(pCtx->ds.Attr.n.u1Present);
4308 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
4309 Assert(!(pCtx->ds.Attr.u & 0xf00));
4310 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
4311 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4312 || !(pCtx->ds.Attr.n.u1Granularity));
4313 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
4314 || (pCtx->ds.Attr.n.u1Granularity));
4315 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4316 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
4317 }
4318 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4319 {
4320 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4321 Assert(pCtx->es.Attr.n.u1Present);
4322 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
4323 Assert(!(pCtx->es.Attr.u & 0xf00));
4324 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
4325 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
4326 || !(pCtx->es.Attr.n.u1Granularity));
4327 Assert( !(pCtx->es.u32Limit & 0xfff00000)
4328 || (pCtx->es.Attr.n.u1Granularity));
4329 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4330 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
4331 }
4332 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4333 {
4334 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4335 Assert(pCtx->fs.Attr.n.u1Present);
4336 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
4337 Assert(!(pCtx->fs.Attr.u & 0xf00));
4338 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
4339 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
4340 || !(pCtx->fs.Attr.n.u1Granularity));
4341 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
4342 || (pCtx->fs.Attr.n.u1Granularity));
4343 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4344 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4345 }
4346 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
4347 {
4348 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4349 Assert(pCtx->gs.Attr.n.u1Present);
4350 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
4351 Assert(!(pCtx->gs.Attr.u & 0xf00));
4352 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
4353 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
4354 || !(pCtx->gs.Attr.n.u1Granularity));
4355 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
4356 || (pCtx->gs.Attr.n.u1Granularity));
4357 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4358 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4359 }
4360 /* 64-bit capable CPUs. */
4361# if HC_ARCH_BITS == 64
4362 Assert(!RT_HI_U32(pCtx->cs.u64Base));
4363 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
4364 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
4365 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
4366# endif
4367 }
4368 else if ( CPUMIsGuestInV86ModeEx(pCtx)
4369 || ( CPUMIsGuestInRealModeEx(pCtx)
4370 && !pVM->hm.s.vmx.fUnrestrictedGuest))
4371 {
4372 /* Real and v86 mode checks. */
4373 /* hmR0VmxExportGuestSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
4374 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
4375 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4376 {
4377 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
4378 }
4379 else
4380 {
4381 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
4382 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
4383 }
4384
4385 /* CS */
4386 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
4387 Assert(pCtx->cs.u32Limit == 0xffff);
4388 Assert(u32CSAttr == 0xf3);
4389 /* SS */
4390 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
4391 Assert(pCtx->ss.u32Limit == 0xffff);
4392 Assert(u32SSAttr == 0xf3);
4393 /* DS */
4394 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
4395 Assert(pCtx->ds.u32Limit == 0xffff);
4396 Assert(u32DSAttr == 0xf3);
4397 /* ES */
4398 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
4399 Assert(pCtx->es.u32Limit == 0xffff);
4400 Assert(u32ESAttr == 0xf3);
4401 /* FS */
4402 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
4403 Assert(pCtx->fs.u32Limit == 0xffff);
4404 Assert(u32FSAttr == 0xf3);
4405 /* GS */
4406 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
4407 Assert(pCtx->gs.u32Limit == 0xffff);
4408 Assert(u32GSAttr == 0xf3);
4409 /* 64-bit capable CPUs. */
4410# if HC_ARCH_BITS == 64
4411 Assert(!RT_HI_U32(pCtx->cs.u64Base));
4412 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
4413 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
4414 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
4415# endif
4416 }
4417}
4418#endif /* VBOX_STRICT */
4419
4420
4421/**
4422 * Exports a guest segment register into the guest-state area in the VMCS.
4423 *
4424 * @returns VBox status code.
4425 * @param pVCpu The cross context virtual CPU structure.
4426 * @param idxSel Index of the selector in the VMCS.
4427 * @param idxLimit Index of the segment limit in the VMCS.
4428 * @param idxBase Index of the segment base in the VMCS.
4429 * @param idxAccess Index of the access rights of the segment in the VMCS.
4430 * @param pSelReg Pointer to the segment selector.
4431 *
4432 * @remarks No-long-jump zone!!!
4433 */
4434static int hmR0VmxExportGuestSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
4435 PCCPUMSELREG pSelReg)
4436{
4437 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
4438 rc |= VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
4439 rc |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
4440 AssertRCReturn(rc, rc);
4441
4442 uint32_t u32Access = pSelReg->Attr.u;
4443 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4444 {
4445 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
4446 u32Access = 0xf3;
4447 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
4448 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
4449 }
4450 else
4451 {
4452 /*
4453 * The way to differentiate between whether this is really a null selector or was just
4454 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
4455 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
4456 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
4457 * NULL selectors loaded in protected-mode have their attribute as 0.
4458 */
4459 if (!u32Access)
4460 u32Access = X86DESCATTR_UNUSABLE;
4461 }
4462
4463 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
4464 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
4465 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
4466
4467 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
4468 AssertRCReturn(rc, rc);
4469 return rc;
4470}
4471
4472
4473/**
4474 * Exports the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
4475 * into the guest-state area in the VMCS.
4476 *
4477 * @returns VBox status code.
4478 * @param pVCpu The cross context virtual CPU structure.
4479 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4480 * out-of-sync. Make sure to update the required fields
4481 * before using them.
4482 *
4483 * @remarks Will import guest CR0 on strict builds during validation of
4484 * segments.
4485 * @remarks No-long-jump zone!!!
4486 */
4487static int hmR0VmxExportGuestSegmentRegs(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
4488{
4489 int rc = VERR_INTERNAL_ERROR_5;
4490 PVM pVM = pVCpu->CTX_SUFF(pVM);
4491
4492 /*
4493 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
4494 */
4495 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
4496 {
4497#ifdef VBOX_WITH_REM
4498 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
4499 {
4500 Assert(pVM->hm.s.vmx.pRealModeTSS);
4501 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
4502 if ( pVCpu->hm.s.vmx.fWasInRealMode
4503 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
4504 {
4505 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
4506 in real-mode (e.g. OpenBSD 4.0) */
4507 REMFlushTBs(pVM);
4508 Log4Func(("Switch to protected mode detected!\n"));
4509 pVCpu->hm.s.vmx.fWasInRealMode = false;
4510 }
4511 }
4512#endif
4513 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CS)
4514 {
4515 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
4516 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4517 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pMixedCtx->cs.Attr.u;
4518 rc = HMVMX_EXPORT_SREG(CS, &pMixedCtx->cs);
4519 AssertRCReturn(rc, rc);
4520 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CS);
4521 }
4522
4523 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SS)
4524 {
4525 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
4526 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4527 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pMixedCtx->ss.Attr.u;
4528 rc = HMVMX_EXPORT_SREG(SS, &pMixedCtx->ss);
4529 AssertRCReturn(rc, rc);
4530 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SS);
4531 }
4532
4533 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_DS)
4534 {
4535 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
4536 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4537 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pMixedCtx->ds.Attr.u;
4538 rc = HMVMX_EXPORT_SREG(DS, &pMixedCtx->ds);
4539 AssertRCReturn(rc, rc);
4540 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_DS);
4541 }
4542
4543 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_ES)
4544 {
4545 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
4546 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4547 pVCpu->hm.s.vmx.RealMode.AttrES.u = pMixedCtx->es.Attr.u;
4548 rc = HMVMX_EXPORT_SREG(ES, &pMixedCtx->es);
4549 AssertRCReturn(rc, rc);
4550 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_ES);
4551 }
4552
4553 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_FS)
4554 {
4555 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
4556 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4557 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pMixedCtx->fs.Attr.u;
4558 rc = HMVMX_EXPORT_SREG(FS, &pMixedCtx->fs);
4559 AssertRCReturn(rc, rc);
4560 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_FS);
4561 }
4562
4563 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GS)
4564 {
4565 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
4566 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4567 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pMixedCtx->gs.Attr.u;
4568 rc = HMVMX_EXPORT_SREG(GS, &pMixedCtx->gs);
4569 AssertRCReturn(rc, rc);
4570 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GS);
4571 }
4572
4573#ifdef VBOX_STRICT
4574 hmR0VmxValidateSegmentRegs(pVCpu, pMixedCtx);
4575#endif
4576
4577 /* Update the exit history entry with the correct CS.BASE + RIP. */
4578 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP)
4579 EMR0HistoryUpdatePC(pVCpu, pMixedCtx->cs.u64Base + pMixedCtx->rip, true);
4580
4581 Log4Func(("CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base,
4582 pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
4583 }
4584
4585 /*
4586 * Guest TR.
4587 */
4588 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_TR)
4589 {
4590 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
4591
4592 /*
4593 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
4594 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
4595 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
4596 */
4597 uint16_t u16Sel = 0;
4598 uint32_t u32Limit = 0;
4599 uint64_t u64Base = 0;
4600 uint32_t u32AccessRights = 0;
4601
4602 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4603 {
4604 u16Sel = pMixedCtx->tr.Sel;
4605 u32Limit = pMixedCtx->tr.u32Limit;
4606 u64Base = pMixedCtx->tr.u64Base;
4607 u32AccessRights = pMixedCtx->tr.Attr.u;
4608 }
4609 else
4610 {
4611 Assert(pVM->hm.s.vmx.pRealModeTSS);
4612 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
4613
4614 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
4615 RTGCPHYS GCPhys;
4616 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
4617 AssertRCReturn(rc, rc);
4618
4619 X86DESCATTR DescAttr;
4620 DescAttr.u = 0;
4621 DescAttr.n.u1Present = 1;
4622 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
4623
4624 u16Sel = 0;
4625 u32Limit = HM_VTX_TSS_SIZE;
4626 u64Base = GCPhys; /* in real-mode phys = virt. */
4627 u32AccessRights = DescAttr.u;
4628 }
4629
4630 /* Validate. */
4631 Assert(!(u16Sel & RT_BIT(2)));
4632 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
4633 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
4634 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
4635 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
4636 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
4637 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
4638 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
4639 Assert( (u32Limit & 0xfff) == 0xfff
4640 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
4641 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
4642 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
4643
4644 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_TR_SEL, u16Sel);
4645 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit);
4646 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights);
4647 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base);
4648 AssertRCReturn(rc, rc);
4649
4650 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_TR);
4651 Log4Func(("TR base=%#RX64\n", pMixedCtx->tr.u64Base));
4652 }
4653
4654 /*
4655 * Guest GDTR.
4656 */
4657 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GDTR)
4658 {
4659 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
4660
4661 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt);
4662 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt);
4663 AssertRCReturn(rc, rc);
4664
4665 /* Validate. */
4666 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4667
4668 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
4669 Log4Func(("GDTR base=%#RX64\n", pMixedCtx->gdtr.pGdt));
4670 }
4671
4672 /*
4673 * Guest LDTR.
4674 */
4675 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_LDTR)
4676 {
4677 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
4678
4679 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
4680 uint32_t u32Access = 0;
4681 if (!pMixedCtx->ldtr.Attr.u)
4682 u32Access = X86DESCATTR_UNUSABLE;
4683 else
4684 u32Access = pMixedCtx->ldtr.Attr.u;
4685
4686 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_LDTR_SEL, pMixedCtx->ldtr.Sel);
4687 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit);
4688 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access);
4689 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base);
4690 AssertRCReturn(rc, rc);
4691
4692 /* Validate. */
4693 if (!(u32Access & X86DESCATTR_UNUSABLE))
4694 {
4695 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
4696 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
4697 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
4698 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
4699 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
4700 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
4701 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
4702 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
4703 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
4704 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
4705 }
4706
4707 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
4708 Log4Func(("LDTR base=%#RX64\n", pMixedCtx->ldtr.u64Base));
4709 }
4710
4711 /*
4712 * Guest IDTR.
4713 */
4714 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_IDTR)
4715 {
4716 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
4717
4718 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt);
4719 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt);
4720 AssertRCReturn(rc, rc);
4721
4722 /* Validate. */
4723 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4724
4725 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
4726 Log4Func(("IDTR base=%#RX64\n", pMixedCtx->idtr.pIdt));
4727 }
4728
4729 return VINF_SUCCESS;
4730}
4731
4732
4733/**
4734 * Exports certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
4735 * areas.
4736 *
4737 * These MSRs will automatically be loaded to the host CPU on every successful
4738 * VM-entry and stored from the host CPU on every successful VM-exit. This also
4739 * creates/updates MSR slots for the host MSRs. The actual host MSR values are
4740 * -not- updated here for performance reasons. See hmR0VmxExportHostMsrs().
4741 *
4742 * Also exports the guest sysenter MSRs into the guest-state area in the VMCS.
4743 *
4744 * @returns VBox status code.
4745 * @param pVCpu The cross context virtual CPU structure.
4746 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4747 * out-of-sync. Make sure to update the required fields
4748 * before using them.
4749 *
4750 * @remarks No-long-jump zone!!!
4751 */
4752static int hmR0VmxExportGuestMsrs(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
4753{
4754 AssertPtr(pVCpu);
4755 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
4756
4757 /*
4758 * MSRs that we use the auto-load/store MSR area in the VMCS.
4759 * For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs().
4760 */
4761 PVM pVM = pVCpu->CTX_SUFF(pVM);
4762 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
4763 {
4764 if (pVM->hm.s.fAllow64BitGuests)
4765 {
4766#if HC_ARCH_BITS == 32
4767 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_KERNEL_GS_BASE);
4768
4769 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false, NULL);
4770 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false, NULL);
4771 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false, NULL);
4772 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false, NULL);
4773 AssertRCReturn(rc, rc);
4774# ifdef LOG_ENABLED
4775 PCVMXAUTOMSR pMsr = (PCVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
4776 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
4777 Log4Func(("MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value));
4778# endif
4779#endif
4780 }
4781 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_AUTO_MSRS);
4782 }
4783
4784 /*
4785 * Guest Sysenter MSRs.
4786 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
4787 * VM-exits on WRMSRs for these MSRs.
4788 */
4789 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
4790 {
4791 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4792
4793 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
4794 {
4795 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs);
4796 AssertRCReturn(rc, rc);
4797 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR);
4798 }
4799
4800 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
4801 {
4802 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip);
4803 AssertRCReturn(rc, rc);
4804 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
4805 }
4806
4807 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
4808 {
4809 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp);
4810 AssertRCReturn(rc, rc);
4811 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
4812 }
4813 }
4814
4815 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR)
4816 {
4817 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
4818
4819 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
4820 {
4821 /*
4822 * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
4823 * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
4824 */
4825 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
4826 {
4827 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER);
4828 AssertRCReturn(rc,rc);
4829 Log4Func(("EFER=%#RX64\n", pMixedCtx->msrEFER));
4830 }
4831 else
4832 {
4833 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */,
4834 NULL /* pfAddedAndUpdated */);
4835 AssertRCReturn(rc, rc);
4836
4837 /* We need to intercept reads too, see @bugref{7386#c16}. */
4838 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
4839 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
4840 Log4Func(("MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pMixedCtx->msrEFER,
4841 pVCpu->hm.s.vmx.cMsrs));
4842 }
4843 }
4844 else if (!pVM->hm.s.vmx.fSupportsVmcsEfer)
4845 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER);
4846 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);
4847 }
4848
4849 return VINF_SUCCESS;
4850}
4851
4852
4853#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
4854/**
4855 * Check if guest state allows safe use of 32-bit switcher again.
4856 *
4857 * Segment bases and protected mode structures must be 32-bit addressable
4858 * because the 32-bit switcher will ignore high dword when writing these VMCS
4859 * fields. See @bugref{8432} for details.
4860 *
4861 * @returns true if safe, false if must continue to use the 64-bit switcher.
4862 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4863 * out-of-sync. Make sure to update the required fields
4864 * before using them.
4865 *
4866 * @remarks No-long-jump zone!!!
4867 */
4868static bool hmR0VmxIs32BitSwitcherSafe(PCCPUMCTX pMixedCtx)
4869{
4870 if (pMixedCtx->gdtr.pGdt & UINT64_C(0xffffffff00000000)) return false;
4871 if (pMixedCtx->idtr.pIdt & UINT64_C(0xffffffff00000000)) return false;
4872 if (pMixedCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000)) return false;
4873 if (pMixedCtx->tr.u64Base & UINT64_C(0xffffffff00000000)) return false;
4874 if (pMixedCtx->es.u64Base & UINT64_C(0xffffffff00000000)) return false;
4875 if (pMixedCtx->cs.u64Base & UINT64_C(0xffffffff00000000)) return false;
4876 if (pMixedCtx->ss.u64Base & UINT64_C(0xffffffff00000000)) return false;
4877 if (pMixedCtx->ds.u64Base & UINT64_C(0xffffffff00000000)) return false;
4878 if (pMixedCtx->fs.u64Base & UINT64_C(0xffffffff00000000)) return false;
4879 if (pMixedCtx->gs.u64Base & UINT64_C(0xffffffff00000000)) return false;
4880
4881 /* All good, bases are 32-bit. */
4882 return true;
4883}
4884#endif
4885
4886
4887/**
4888 * Selects up the appropriate function to run guest code.
4889 *
4890 * @returns VBox status code.
4891 * @param pVCpu The cross context virtual CPU structure.
4892 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4893 * out-of-sync. Make sure to update the required fields
4894 * before using them.
4895 *
4896 * @remarks No-long-jump zone!!!
4897 */
4898static int hmR0VmxSelectVMRunHandler(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
4899{
4900 if (CPUMIsGuestInLongModeEx(pMixedCtx))
4901 {
4902#ifndef VBOX_ENABLE_64_BITS_GUESTS
4903 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4904#endif
4905 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
4906#if HC_ARCH_BITS == 32
4907 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
4908 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
4909 {
4910#ifdef VBOX_STRICT
4911 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4912 {
4913 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4914 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
4915 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
4916 AssertMsg(fCtxChanged & ( HM_CHANGED_VMX_EXIT_CTLS
4917 | HM_CHANGED_VMX_ENTRY_CTLS
4918 | HM_CHANGED_GUEST_EFER_MSR), ("fCtxChanged=%#RX64\n", fCtxChanged));
4919 }
4920#endif
4921 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
4922
4923 /* Mark that we've switched to 64-bit handler, we can't safely switch back to 32-bit for
4924 the rest of the VM run (until VM reset). See @bugref{8432#c7}. */
4925 pVCpu->hm.s.vmx.fSwitchedTo64on32 = true;
4926 Log4Func(("Selected 64-bit switcher\n"));
4927 }
4928#else
4929 /* 64-bit host. */
4930 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
4931#endif
4932 }
4933 else
4934 {
4935 /* Guest is not in long mode, use the 32-bit handler. */
4936#if HC_ARCH_BITS == 32
4937 if ( pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32
4938 && !pVCpu->hm.s.vmx.fSwitchedTo64on32 /* If set, guest mode change does not imply switcher change. */
4939 && pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4940 {
4941# ifdef VBOX_STRICT
4942 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4943 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
4944 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
4945 AssertMsg(fCtxChanged & ( HM_CHANGED_VMX_EXIT_CTLS
4946 | HM_CHANGED_VMX_ENTRY_CTLS
4947 | HM_CHANGED_GUEST_EFER_MSR), ("fCtxChanged=%#RX64\n", fCtxChanged));
4948# endif
4949 }
4950# ifdef VBOX_ENABLE_64_BITS_GUESTS
4951 /*
4952 * Keep using the 64-bit switcher even though we're in 32-bit because of bad Intel
4953 * design, see @bugref{8432#c7}. If real-on-v86 mode is active, clear the 64-bit
4954 * switcher flag because now we know the guest is in a sane state where it's safe
4955 * to use the 32-bit switcher. Otherwise check the guest state if it's safe to use
4956 * the much faster 32-bit switcher again.
4957 */
4958 if (!pVCpu->hm.s.vmx.fSwitchedTo64on32)
4959 {
4960 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32)
4961 Log4Func(("Selected 32-bit switcher\n"));
4962 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4963 }
4964 else
4965 {
4966 Assert(pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64);
4967 if ( pVCpu->hm.s.vmx.RealMode.fRealOnV86Active
4968 || hmR0VmxIs32BitSwitcherSafe(pMixedCtx))
4969 {
4970 pVCpu->hm.s.vmx.fSwitchedTo64on32 = false;
4971 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4972 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR
4973 | HM_CHANGED_VMX_ENTRY_CTLS
4974 | HM_CHANGED_VMX_EXIT_CTLS
4975 | HM_CHANGED_HOST_CONTEXT);
4976 Log4Func(("Selected 32-bit switcher (safe)\n"));
4977 }
4978 }
4979# else
4980 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4981# endif
4982#else
4983 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4984#endif
4985 }
4986 Assert(pVCpu->hm.s.vmx.pfnStartVM);
4987 return VINF_SUCCESS;
4988}
4989
4990
4991/**
4992 * Wrapper for running the guest code in VT-x.
4993 *
4994 * @returns VBox status code, no informational status codes.
4995 * @param pVCpu The cross context virtual CPU structure.
4996 * @param pCtx Pointer to the guest-CPU context.
4997 *
4998 * @remarks No-long-jump zone!!!
4999 */
5000DECLINLINE(int) hmR0VmxRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx)
5001{
5002 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
5003 pCtx->fExtrn |= HMVMX_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
5004
5005 /*
5006 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses
5007 * floating-point operations using SSE instructions. Some XMM registers (XMM6-XMM15) are
5008 * callee-saved and thus the need for this XMM wrapper.
5009 *
5010 * See MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage".
5011 */
5012 bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
5013 /** @todo Add stats for resume vs launch. */
5014 PVM pVM = pVCpu->CTX_SUFF(pVM);
5015#ifdef VBOX_WITH_KERNEL_USING_XMM
5016 int rc = hmR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
5017#else
5018 int rc = pVCpu->hm.s.vmx.pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
5019#endif
5020 AssertMsg(rc <= VINF_SUCCESS, ("%Rrc\n", rc));
5021 return rc;
5022}
5023
5024
5025/**
5026 * Reports world-switch error and dumps some useful debug info.
5027 *
5028 * @param pVCpu The cross context virtual CPU structure.
5029 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
5030 * @param pCtx Pointer to the guest-CPU context.
5031 * @param pVmxTransient Pointer to the VMX transient structure (only
5032 * exitReason updated).
5033 */
5034static void hmR0VmxReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
5035{
5036 Assert(pVCpu);
5037 Assert(pCtx);
5038 Assert(pVmxTransient);
5039 HMVMX_ASSERT_PREEMPT_SAFE();
5040
5041 Log4Func(("VM-entry failure: %Rrc\n", rcVMRun));
5042 switch (rcVMRun)
5043 {
5044 case VERR_VMX_INVALID_VMXON_PTR:
5045 AssertFailed();
5046 break;
5047 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
5048 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
5049 {
5050 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
5051 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
5052 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
5053 AssertRC(rc);
5054
5055 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
5056 /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted().
5057 Cannot do it here as we may have been long preempted. */
5058
5059#ifdef VBOX_STRICT
5060 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
5061 pVmxTransient->uExitReason));
5062 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));
5063 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
5064 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
5065 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
5066 else
5067 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
5068 Log4(("Entered host CPU %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));
5069 Log4(("Current host CPU %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));
5070
5071 /* VMX control bits. */
5072 uint32_t u32Val;
5073 uint64_t u64Val;
5074 RTHCUINTREG uHCReg;
5075 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
5076 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
5077 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
5078 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
5079 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
5080 {
5081 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
5082 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
5083 }
5084 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
5085 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
5086 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
5087 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
5088 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
5089 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
5090 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
5091 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
5092 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
5093 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
5094 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
5095 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
5096 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
5097 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
5098 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
5099 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
5100 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
5101 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
5102 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
5103 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
5104 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
5105 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
5106 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
5107 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
5108 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
5109 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
5110 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
5111 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
5112 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
5113 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
5114 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
5115 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
5116 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
5117 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
5118 if (pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
5119 {
5120 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
5121 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
5122 }
5123
5124 /* Guest bits. */
5125 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc);
5126 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val));
5127 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc);
5128 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val));
5129 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
5130 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
5131 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid)
5132 {
5133 rc = VMXReadVmcs32(VMX_VMCS16_VPID, &u32Val); AssertRC(rc);
5134 Log4(("VMX_VMCS16_VPID %u\n", u32Val));
5135 }
5136
5137 /* Host bits. */
5138 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
5139 Log4(("Host CR0 %#RHr\n", uHCReg));
5140 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
5141 Log4(("Host CR3 %#RHr\n", uHCReg));
5142 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
5143 Log4(("Host CR4 %#RHr\n", uHCReg));
5144
5145 RTGDTR HostGdtr;
5146 PCX86DESCHC pDesc;
5147 ASMGetGDTR(&HostGdtr);
5148 rc = VMXReadVmcs32(VMX_VMCS16_HOST_CS_SEL, &u32Val); AssertRC(rc);
5149 Log4(("Host CS %#08x\n", u32Val));
5150 if (u32Val < HostGdtr.cbGdt)
5151 {
5152 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5153 hmR0DumpDescriptor(pDesc, u32Val, "CS: ");
5154 }
5155
5156 rc = VMXReadVmcs32(VMX_VMCS16_HOST_DS_SEL, &u32Val); AssertRC(rc);
5157 Log4(("Host DS %#08x\n", u32Val));
5158 if (u32Val < HostGdtr.cbGdt)
5159 {
5160 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5161 hmR0DumpDescriptor(pDesc, u32Val, "DS: ");
5162 }
5163
5164 rc = VMXReadVmcs32(VMX_VMCS16_HOST_ES_SEL, &u32Val); AssertRC(rc);
5165 Log4(("Host ES %#08x\n", u32Val));
5166 if (u32Val < HostGdtr.cbGdt)
5167 {
5168 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5169 hmR0DumpDescriptor(pDesc, u32Val, "ES: ");
5170 }
5171
5172 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FS_SEL, &u32Val); AssertRC(rc);
5173 Log4(("Host FS %#08x\n", u32Val));
5174 if (u32Val < HostGdtr.cbGdt)
5175 {
5176 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5177 hmR0DumpDescriptor(pDesc, u32Val, "FS: ");
5178 }
5179
5180 rc = VMXReadVmcs32(VMX_VMCS16_HOST_GS_SEL, &u32Val); AssertRC(rc);
5181 Log4(("Host GS %#08x\n", u32Val));
5182 if (u32Val < HostGdtr.cbGdt)
5183 {
5184 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5185 hmR0DumpDescriptor(pDesc, u32Val, "GS: ");
5186 }
5187
5188 rc = VMXReadVmcs32(VMX_VMCS16_HOST_SS_SEL, &u32Val); AssertRC(rc);
5189 Log4(("Host SS %#08x\n", u32Val));
5190 if (u32Val < HostGdtr.cbGdt)
5191 {
5192 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5193 hmR0DumpDescriptor(pDesc, u32Val, "SS: ");
5194 }
5195
5196 rc = VMXReadVmcs32(VMX_VMCS16_HOST_TR_SEL, &u32Val); AssertRC(rc);
5197 Log4(("Host TR %#08x\n", u32Val));
5198 if (u32Val < HostGdtr.cbGdt)
5199 {
5200 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5201 hmR0DumpDescriptor(pDesc, u32Val, "TR: ");
5202 }
5203
5204 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
5205 Log4(("Host TR Base %#RHv\n", uHCReg));
5206 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
5207 Log4(("Host GDTR Base %#RHv\n", uHCReg));
5208 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
5209 Log4(("Host IDTR Base %#RHv\n", uHCReg));
5210 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
5211 Log4(("Host SYSENTER CS %#08x\n", u32Val));
5212 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
5213 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg));
5214 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
5215 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg));
5216 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
5217 Log4(("Host RSP %#RHv\n", uHCReg));
5218 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
5219 Log4(("Host RIP %#RHv\n", uHCReg));
5220# if HC_ARCH_BITS == 64
5221 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
5222 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
5223 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
5224 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
5225 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
5226 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
5227# endif
5228#endif /* VBOX_STRICT */
5229 break;
5230 }
5231
5232 default:
5233 /* Impossible */
5234 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
5235 break;
5236 }
5237 NOREF(pCtx);
5238}
5239
5240
5241#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
5242#ifndef VMX_USE_CACHED_VMCS_ACCESSES
5243# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
5244#endif
5245#ifdef VBOX_STRICT
5246static bool hmR0VmxIsValidWriteField(uint32_t idxField)
5247{
5248 switch (idxField)
5249 {
5250 case VMX_VMCS_GUEST_RIP:
5251 case VMX_VMCS_GUEST_RSP:
5252 case VMX_VMCS_GUEST_SYSENTER_EIP:
5253 case VMX_VMCS_GUEST_SYSENTER_ESP:
5254 case VMX_VMCS_GUEST_GDTR_BASE:
5255 case VMX_VMCS_GUEST_IDTR_BASE:
5256 case VMX_VMCS_GUEST_CS_BASE:
5257 case VMX_VMCS_GUEST_DS_BASE:
5258 case VMX_VMCS_GUEST_ES_BASE:
5259 case VMX_VMCS_GUEST_FS_BASE:
5260 case VMX_VMCS_GUEST_GS_BASE:
5261 case VMX_VMCS_GUEST_SS_BASE:
5262 case VMX_VMCS_GUEST_LDTR_BASE:
5263 case VMX_VMCS_GUEST_TR_BASE:
5264 case VMX_VMCS_GUEST_CR3:
5265 return true;
5266 }
5267 return false;
5268}
5269
5270static bool hmR0VmxIsValidReadField(uint32_t idxField)
5271{
5272 switch (idxField)
5273 {
5274 /* Read-only fields. */
5275 case VMX_VMCS_RO_EXIT_QUALIFICATION:
5276 return true;
5277 }
5278 /* Remaining readable fields should also be writable. */
5279 return hmR0VmxIsValidWriteField(idxField);
5280}
5281#endif /* VBOX_STRICT */
5282
5283
5284/**
5285 * Executes the specified handler in 64-bit mode.
5286 *
5287 * @returns VBox status code (no informational status codes).
5288 * @param pVCpu The cross context virtual CPU structure.
5289 * @param enmOp The operation to perform.
5290 * @param cParams Number of parameters.
5291 * @param paParam Array of 32-bit parameters.
5292 */
5293VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cParams, uint32_t *paParam)
5294{
5295 PVM pVM = pVCpu->CTX_SUFF(pVM);
5296 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
5297 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
5298 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
5299 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
5300
5301#ifdef VBOX_STRICT
5302 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
5303 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
5304
5305 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
5306 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
5307#endif
5308
5309 /* Disable interrupts. */
5310 RTCCUINTREG fOldEFlags = ASMIntDisableFlags();
5311
5312#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
5313 RTCPUID idHostCpu = RTMpCpuId();
5314 CPUMR0SetLApic(pVCpu, idHostCpu);
5315#endif
5316
5317 PHMGLOBALCPUINFO pCpu = hmR0GetCurrentCpu();
5318 RTHCPHYS HCPhysCpuPage = pCpu->HCPhysMemObj;
5319
5320 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
5321 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5322 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
5323
5324 /* Leave VMX Root Mode. */
5325 VMXDisable();
5326
5327 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
5328
5329 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
5330 CPUMSetHyperEIP(pVCpu, enmOp);
5331 for (int i = (int)cParams - 1; i >= 0; i--)
5332 CPUMPushHyper(pVCpu, paParam[i]);
5333
5334 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
5335
5336 /* Call the switcher. */
5337 int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
5338 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
5339
5340 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
5341 /* Make sure the VMX instructions don't cause #UD faults. */
5342 SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);
5343
5344 /* Re-enter VMX Root Mode */
5345 int rc2 = VMXEnable(HCPhysCpuPage);
5346 if (RT_FAILURE(rc2))
5347 {
5348 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
5349 ASMSetFlags(fOldEFlags);
5350 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
5351 return rc2;
5352 }
5353
5354 rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5355 AssertRC(rc2);
5356 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
5357 Assert(!(ASMGetFlags() & X86_EFL_IF));
5358 ASMSetFlags(fOldEFlags);
5359 return rc;
5360}
5361
5362
5363/**
5364 * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts
5365 * supporting 64-bit guests.
5366 *
5367 * @returns VBox status code.
5368 * @param fResume Whether to VMLAUNCH or VMRESUME.
5369 * @param pCtx Pointer to the guest-CPU context.
5370 * @param pCache Pointer to the VMCS cache.
5371 * @param pVM The cross context VM structure.
5372 * @param pVCpu The cross context virtual CPU structure.
5373 */
5374DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
5375{
5376 NOREF(fResume);
5377
5378 PHMGLOBALCPUINFO pCpu = hmR0GetCurrentCpu();
5379 RTHCPHYS HCPhysCpuPage = pCpu->HCPhysMemObj;
5380
5381#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5382 pCache->uPos = 1;
5383 pCache->interPD = PGMGetInterPaeCR3(pVM);
5384 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
5385#endif
5386
5387#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5388 pCache->TestIn.HCPhysCpuPage = 0;
5389 pCache->TestIn.HCPhysVmcs = 0;
5390 pCache->TestIn.pCache = 0;
5391 pCache->TestOut.HCPhysVmcs = 0;
5392 pCache->TestOut.pCache = 0;
5393 pCache->TestOut.pCtx = 0;
5394 pCache->TestOut.eflags = 0;
5395#else
5396 NOREF(pCache);
5397#endif
5398
5399 uint32_t aParam[10];
5400 aParam[0] = RT_LO_U32(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
5401 aParam[1] = RT_HI_U32(HCPhysCpuPage); /* Param 1: VMXON physical address - Hi. */
5402 aParam[2] = RT_LO_U32(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
5403 aParam[3] = RT_HI_U32(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Hi. */
5404 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
5405 aParam[5] = 0;
5406 aParam[6] = VM_RC_ADDR(pVM, pVM);
5407 aParam[7] = 0;
5408 aParam[8] = VM_RC_ADDR(pVM, pVCpu);
5409 aParam[9] = 0;
5410
5411#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5412 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
5413 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
5414#endif
5415 int rc = VMXR0Execute64BitsHandler(pVCpu, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]);
5416
5417#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5418 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
5419 Assert(pCtx->dr[4] == 10);
5420 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
5421#endif
5422
5423#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5424 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
5425 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5426 pVCpu->hm.s.vmx.HCPhysVmcs));
5427 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5428 pCache->TestOut.HCPhysVmcs));
5429 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
5430 pCache->TestOut.pCache));
5431 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
5432 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
5433 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
5434 pCache->TestOut.pCtx));
5435 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
5436#endif
5437 NOREF(pCtx);
5438 return rc;
5439}
5440
5441
5442/**
5443 * Initialize the VMCS-Read cache.
5444 *
5445 * The VMCS cache is used for 32-bit hosts running 64-bit guests (except 32-bit
5446 * Darwin which runs with 64-bit paging in 32-bit mode) for 64-bit fields that
5447 * cannot be accessed in 32-bit mode. Some 64-bit fields -can- be accessed
5448 * (those that have a 32-bit FULL & HIGH part).
5449 *
5450 * @returns VBox status code.
5451 * @param pVCpu The cross context virtual CPU structure.
5452 */
5453static int hmR0VmxInitVmcsReadCache(PVMCPU pVCpu)
5454{
5455#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
5456 do { \
5457 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
5458 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
5459 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
5460 ++cReadFields; \
5461 } while (0)
5462
5463 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5464 uint32_t cReadFields = 0;
5465
5466 /*
5467 * Don't remove the #if 0'd fields in this code. They're listed here for consistency
5468 * and serve to indicate exceptions to the rules.
5469 */
5470
5471 /* Guest-natural selector base fields. */
5472#if 0
5473 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
5474 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
5475 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
5476#endif
5477 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
5478 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
5479 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
5480 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
5481 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
5482 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
5483 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
5484 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
5485 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
5486 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
5487 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
5488 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
5489#if 0
5490 /* Unused natural width guest-state fields. */
5491 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
5492 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
5493#endif
5494 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
5495 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
5496
5497 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for
5498 these 64-bit fields (using "FULL" and "HIGH" fields). */
5499#if 0
5500 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
5501 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
5502 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
5503 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
5504 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
5505 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
5506 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
5507 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
5508 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
5509#endif
5510
5511 /* Natural width guest-state fields. */
5512 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
5513#if 0
5514 /* Currently unused field. */
5515 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
5516#endif
5517
5518 if (pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
5519 {
5520 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
5521 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
5522 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
5523 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
5524 }
5525 else
5526 {
5527 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
5528 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
5529 }
5530
5531#undef VMXLOCAL_INIT_READ_CACHE_FIELD
5532 return VINF_SUCCESS;
5533}
5534
5535
5536/**
5537 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
5538 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
5539 * darwin, running 64-bit guests).
5540 *
5541 * @returns VBox status code.
5542 * @param pVCpu The cross context virtual CPU structure.
5543 * @param idxField The VMCS field encoding.
5544 * @param u64Val 16, 32 or 64-bit value.
5545 */
5546VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5547{
5548 int rc;
5549 switch (idxField)
5550 {
5551 /*
5552 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
5553 */
5554 /* 64-bit Control fields. */
5555 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
5556 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
5557 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
5558 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
5559 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
5560 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
5561 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
5562 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
5563 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
5564 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
5565 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
5566 case VMX_VMCS64_CTRL_EPTP_FULL:
5567 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
5568 /* 64-bit Guest-state fields. */
5569 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
5570 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
5571 case VMX_VMCS64_GUEST_PAT_FULL:
5572 case VMX_VMCS64_GUEST_EFER_FULL:
5573 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
5574 case VMX_VMCS64_GUEST_PDPTE0_FULL:
5575 case VMX_VMCS64_GUEST_PDPTE1_FULL:
5576 case VMX_VMCS64_GUEST_PDPTE2_FULL:
5577 case VMX_VMCS64_GUEST_PDPTE3_FULL:
5578 /* 64-bit Host-state fields. */
5579 case VMX_VMCS64_HOST_PAT_FULL:
5580 case VMX_VMCS64_HOST_EFER_FULL:
5581 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
5582 {
5583 rc = VMXWriteVmcs32(idxField, RT_LO_U32(u64Val));
5584 rc |= VMXWriteVmcs32(idxField + 1, RT_HI_U32(u64Val));
5585 break;
5586 }
5587
5588 /*
5589 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
5590 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
5591 */
5592 /* Natural-width Guest-state fields. */
5593 case VMX_VMCS_GUEST_CR3:
5594 case VMX_VMCS_GUEST_ES_BASE:
5595 case VMX_VMCS_GUEST_CS_BASE:
5596 case VMX_VMCS_GUEST_SS_BASE:
5597 case VMX_VMCS_GUEST_DS_BASE:
5598 case VMX_VMCS_GUEST_FS_BASE:
5599 case VMX_VMCS_GUEST_GS_BASE:
5600 case VMX_VMCS_GUEST_LDTR_BASE:
5601 case VMX_VMCS_GUEST_TR_BASE:
5602 case VMX_VMCS_GUEST_GDTR_BASE:
5603 case VMX_VMCS_GUEST_IDTR_BASE:
5604 case VMX_VMCS_GUEST_RSP:
5605 case VMX_VMCS_GUEST_RIP:
5606 case VMX_VMCS_GUEST_SYSENTER_ESP:
5607 case VMX_VMCS_GUEST_SYSENTER_EIP:
5608 {
5609 if (!(RT_HI_U32(u64Val)))
5610 {
5611 /* If this field is 64-bit, VT-x will zero out the top bits. */
5612 rc = VMXWriteVmcs32(idxField, RT_LO_U32(u64Val));
5613 }
5614 else
5615 {
5616 /* Assert that only the 32->64 switcher case should ever come here. */
5617 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
5618 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
5619 }
5620 break;
5621 }
5622
5623 default:
5624 {
5625 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
5626 rc = VERR_INVALID_PARAMETER;
5627 break;
5628 }
5629 }
5630 AssertRCReturn(rc, rc);
5631 return rc;
5632}
5633
5634
5635/**
5636 * Queue up a VMWRITE by using the VMCS write cache.
5637 * This is only used on 32-bit hosts (except darwin) for 64-bit guests.
5638 *
5639 * @param pVCpu The cross context virtual CPU structure.
5640 * @param idxField The VMCS field encoding.
5641 * @param u64Val 16, 32 or 64-bit value.
5642 */
5643VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5644{
5645 AssertPtr(pVCpu);
5646 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5647
5648 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
5649 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
5650
5651 /* Make sure there are no duplicates. */
5652 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
5653 {
5654 if (pCache->Write.aField[i] == idxField)
5655 {
5656 pCache->Write.aFieldVal[i] = u64Val;
5657 return VINF_SUCCESS;
5658 }
5659 }
5660
5661 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
5662 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
5663 pCache->Write.cValidEntries++;
5664 return VINF_SUCCESS;
5665}
5666#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
5667
5668
5669/**
5670 * Sets up the usage of TSC-offsetting and updates the VMCS.
5671 *
5672 * If offsetting is not possible, cause VM-exits on RDTSC(P)s. Also sets up the
5673 * VMX preemption timer.
5674 *
5675 * @returns VBox status code.
5676 * @param pVCpu The cross context virtual CPU structure.
5677 *
5678 * @remarks No-long-jump zone!!!
5679 */
5680static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu)
5681{
5682 bool fOffsettedTsc;
5683 bool fParavirtTsc;
5684 PVM pVM = pVCpu->CTX_SUFF(pVM);
5685 uint64_t uTscOffset;
5686 if (pVM->hm.s.vmx.fUsePreemptTimer)
5687 {
5688 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &uTscOffset, &fOffsettedTsc, &fParavirtTsc);
5689
5690 /* Make sure the returned values have sane upper and lower boundaries. */
5691 uint64_t u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
5692 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
5693 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
5694 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
5695
5696 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
5697 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount);
5698 AssertRC(rc);
5699 }
5700 else
5701 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
5702
5703 /** @todo later optimize this to be done elsewhere and not before every
5704 * VM-entry. */
5705 if (fParavirtTsc)
5706 {
5707 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
5708 information before every VM-entry, hence disable it for performance sake. */
5709#if 0
5710 int rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
5711 AssertRC(rc);
5712#endif
5713 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
5714 }
5715
5716 uint32_t uProcCtls = pVCpu->hm.s.vmx.u32ProcCtls;
5717 if ( fOffsettedTsc
5718 && RT_LIKELY(!pVCpu->hm.s.fDebugWantRdTscExit))
5719 {
5720 if (pVCpu->hm.s.vmx.u64TscOffset != uTscOffset)
5721 {
5722 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, uTscOffset);
5723 AssertRC(rc);
5724 pVCpu->hm.s.vmx.u64TscOffset = uTscOffset;
5725 }
5726
5727 if (uProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT)
5728 {
5729 uProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5730 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
5731 AssertRC(rc);
5732 pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls;
5733 }
5734 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
5735 }
5736 else
5737 {
5738 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
5739 if (!(uProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
5740 {
5741 uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5742 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
5743 AssertRC(rc);
5744 pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls;
5745 }
5746 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
5747 }
5748}
5749
5750
5751/**
5752 * Gets the IEM exception flags for the specified vector and IDT vectoring /
5753 * VM-exit interruption info type.
5754 *
5755 * @returns The IEM exception flags.
5756 * @param uVector The event vector.
5757 * @param uVmxVectorType The VMX event type.
5758 *
5759 * @remarks This function currently only constructs flags required for
5760 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
5761 * and CR2 aspects of an exception are not included).
5762 */
5763static uint32_t hmR0VmxGetIemXcptFlags(uint8_t uVector, uint32_t uVmxVectorType)
5764{
5765 uint32_t fIemXcptFlags;
5766 switch (uVmxVectorType)
5767 {
5768 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
5769 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
5770 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
5771 break;
5772
5773 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
5774 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
5775 break;
5776
5777 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
5778 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
5779 break;
5780
5781 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
5782 {
5783 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
5784 if (uVector == X86_XCPT_BP)
5785 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
5786 else if (uVector == X86_XCPT_OF)
5787 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
5788 else
5789 {
5790 fIemXcptFlags = 0;
5791 AssertMsgFailed(("Unexpected vector for software int. uVector=%#x", uVector));
5792 }
5793 break;
5794 }
5795
5796 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
5797 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
5798 break;
5799
5800 default:
5801 fIemXcptFlags = 0;
5802 AssertMsgFailed(("Unexpected vector type! uVmxVectorType=%#x uVector=%#x", uVmxVectorType, uVector));
5803 break;
5804 }
5805 return fIemXcptFlags;
5806}
5807
5808
5809/**
5810 * Sets an event as a pending event to be injected into the guest.
5811 *
5812 * @param pVCpu The cross context virtual CPU structure.
5813 * @param u32IntInfo The VM-entry interruption-information field.
5814 * @param cbInstr The VM-entry instruction length in bytes (for software
5815 * interrupts, exceptions and privileged software
5816 * exceptions).
5817 * @param u32ErrCode The VM-entry exception error code.
5818 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
5819 * page-fault.
5820 *
5821 * @remarks Statistics counter assumes this is a guest event being injected or
5822 * re-injected into the guest, i.e. 'StatInjectPendingReflect' is
5823 * always incremented.
5824 */
5825DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
5826 RTGCUINTPTR GCPtrFaultAddress)
5827{
5828 Assert(!pVCpu->hm.s.Event.fPending);
5829 pVCpu->hm.s.Event.fPending = true;
5830 pVCpu->hm.s.Event.u64IntInfo = u32IntInfo;
5831 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
5832 pVCpu->hm.s.Event.cbInstr = cbInstr;
5833 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
5834}
5835
5836
5837/**
5838 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
5839 *
5840 * @param pVCpu The cross context virtual CPU structure.
5841 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5842 * out-of-sync. Make sure to update the required fields
5843 * before using them.
5844 */
5845DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5846{
5847 NOREF(pMixedCtx);
5848 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
5849 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5850 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5851 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5852}
5853
5854
5855/**
5856 * Handle a condition that occurred while delivering an event through the guest
5857 * IDT.
5858 *
5859 * @returns Strict VBox status code (i.e. informational status codes too).
5860 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5861 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5862 * to continue execution of the guest which will delivery the \#DF.
5863 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5864 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5865 *
5866 * @param pVCpu The cross context virtual CPU structure.
5867 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5868 * out-of-sync. Make sure to update the required fields
5869 * before using them.
5870 * @param pVmxTransient Pointer to the VMX transient structure.
5871 *
5872 * @remarks No-long-jump zone!!!
5873 */
5874static VBOXSTRICTRC hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
5875{
5876 uint32_t const uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
5877
5878 int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
5879 rc2 |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
5880 AssertRCReturn(rc2, rc2);
5881
5882 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5883 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
5884 {
5885 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
5886 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
5887
5888 /*
5889 * If the event was a software interrupt (generated with INT n) or a software exception
5890 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
5891 * can handle the VM-exit and continue guest execution which will re-execute the
5892 * instruction rather than re-injecting the exception, as that can cause premature
5893 * trips to ring-3 before injection and involve TRPM which currently has no way of
5894 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
5895 * the problem).
5896 */
5897 IEMXCPTRAISE enmRaise;
5898 IEMXCPTRAISEINFO fRaiseInfo;
5899 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5900 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5901 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5902 {
5903 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5904 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5905 }
5906 else if (VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo))
5907 {
5908 uint32_t const uExitVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uExitIntInfo);
5909 uint32_t const fIdtVectorFlags = hmR0VmxGetIemXcptFlags(uIdtVector, uIdtVectorType);
5910 uint32_t const fExitVectorFlags = hmR0VmxGetIemXcptFlags(uExitVector, uExitVectorType);
5911 /** @todo Make AssertMsgReturn as just AssertMsg later. */
5912 AssertMsgReturn(uExitVectorType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT,
5913 ("hmR0VmxCheckExitDueToEventDelivery: Unexpected VM-exit interruption info. %#x!\n",
5914 uExitVectorType), VERR_VMX_IPE_5);
5915
5916 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5917
5918 /* Determine a vectoring #PF condition, see comment in hmR0VmxExitXcptPF(). */
5919 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5920 {
5921 pVmxTransient->fVectoringPF = true;
5922 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5923 }
5924 }
5925 else
5926 {
5927 /*
5928 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
5929 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
5930 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
5931 */
5932 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5933 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5934 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
5935 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5936 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5937 }
5938
5939 /*
5940 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
5941 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
5942 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
5943 * subsequent VM-entry would fail.
5944 *
5945 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}.
5946 */
5947 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)
5948 && uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5949 && ( enmRaise == IEMXCPTRAISE_PREV_EVENT
5950 || (fRaiseInfo & IEMXCPTRAISEINFO_NMI_PF))
5951 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
5952 {
5953 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
5954 }
5955
5956 switch (enmRaise)
5957 {
5958 case IEMXCPTRAISE_CURRENT_XCPT:
5959 {
5960 Log4Func(("IDT: Pending secondary Xcpt: uIdtVectoringInfo=%#RX64 uExitIntInfo=%#RX64\n",
5961 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uExitIntInfo));
5962 Assert(rcStrict == VINF_SUCCESS);
5963 break;
5964 }
5965
5966 case IEMXCPTRAISE_PREV_EVENT:
5967 {
5968 uint32_t u32ErrCode;
5969 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo))
5970 {
5971 rc2 = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
5972 AssertRCReturn(rc2, rc2);
5973 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5974 }
5975 else
5976 u32ErrCode = 0;
5977
5978 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see hmR0VmxExitXcptPF(). */
5979 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
5980 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
5981 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
5982
5983 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo,
5984 pVCpu->hm.s.Event.u32ErrCode));
5985 Assert(rcStrict == VINF_SUCCESS);
5986 break;
5987 }
5988
5989 case IEMXCPTRAISE_REEXEC_INSTR:
5990 Assert(rcStrict == VINF_SUCCESS);
5991 break;
5992
5993 case IEMXCPTRAISE_DOUBLE_FAULT:
5994 {
5995 /*
5996 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
5997 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5998 */
5999 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6000 {
6001 pVmxTransient->fVectoringDoublePF = true;
6002 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo,
6003 pMixedCtx->cr2));
6004 rcStrict = VINF_SUCCESS;
6005 }
6006 else
6007 {
6008 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
6009 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
6010 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,
6011 uIdtVector, uExitVector));
6012 rcStrict = VINF_HM_DOUBLE_FAULT;
6013 }
6014 break;
6015 }
6016
6017 case IEMXCPTRAISE_TRIPLE_FAULT:
6018 {
6019 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));
6020 rcStrict = VINF_EM_RESET;
6021 break;
6022 }
6023
6024 case IEMXCPTRAISE_CPU_HANG:
6025 {
6026 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6027 rcStrict = VERR_EM_GUEST_CPU_HANG;
6028 break;
6029 }
6030
6031 default:
6032 {
6033 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6034 rcStrict = VERR_VMX_IPE_2;
6035 break;
6036 }
6037 }
6038 }
6039 else if ( VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo)
6040 && VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(pVmxTransient->uExitIntInfo)
6041 && uExitVector != X86_XCPT_DF
6042 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
6043 {
6044 /*
6045 * Execution of IRET caused this fault when NMI blocking was in effect (i.e we're in the guest NMI handler).
6046 * We need to set the block-by-NMI field so that NMIs remain blocked until the IRET execution is restarted.
6047 * See Intel spec. 30.7.1.2 "Resuming guest software after handling an exception".
6048 */
6049 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6050 {
6051 Log4Func(("Setting VMCPU_FF_BLOCK_NMIS. fValid=%RTbool uExitReason=%u\n",
6052 VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason));
6053 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6054 }
6055 }
6056
6057 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6058 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6059 return rcStrict;
6060}
6061
6062
6063/**
6064 * Imports a guest segment register from the current VMCS into
6065 * the guest-CPU context.
6066 *
6067 * @returns VBox status code.
6068 * @param pVCpu The cross context virtual CPU structure.
6069 * @param idxSel Index of the selector in the VMCS.
6070 * @param idxLimit Index of the segment limit in the VMCS.
6071 * @param idxBase Index of the segment base in the VMCS.
6072 * @param idxAccess Index of the access rights of the segment in the VMCS.
6073 * @param pSelReg Pointer to the segment selector.
6074 *
6075 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
6076 * do not log!
6077 *
6078 * @remarks Never call this function directly!!! Use the
6079 * HMVMX_IMPORT_SREG() macro as that takes care
6080 * of whether to read from the VMCS cache or not.
6081 */
6082static int hmR0VmxImportGuestSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
6083 PCPUMSELREG pSelReg)
6084{
6085 NOREF(pVCpu);
6086
6087 uint32_t u32Sel;
6088 uint32_t u32Limit;
6089 uint32_t u32Attr;
6090 uint64_t u64Base;
6091 int rc = VMXReadVmcs32(idxSel, &u32Sel);
6092 rc |= VMXReadVmcs32(idxLimit, &u32Limit);
6093 rc |= VMXReadVmcs32(idxAccess, &u32Attr);
6094 rc |= VMXReadVmcsGstNByIdxVal(idxBase, &u64Base);
6095 AssertRCReturn(rc, rc);
6096
6097 pSelReg->Sel = (uint16_t)u32Sel;
6098 pSelReg->ValidSel = (uint16_t)u32Sel;
6099 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6100 pSelReg->u32Limit = u32Limit;
6101 pSelReg->u64Base = u64Base;
6102 pSelReg->Attr.u = u32Attr;
6103
6104 /*
6105 * If VT-x marks the segment as unusable, most other bits remain undefined:
6106 * - For CS the L, D and G bits have meaning.
6107 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
6108 * - For the remaining data segments no bits are defined.
6109 *
6110 * The present bit and the unusable bit has been observed to be set at the
6111 * same time (the selector was supposed to be invalid as we started executing
6112 * a V8086 interrupt in ring-0).
6113 *
6114 * What should be important for the rest of the VBox code, is that the P bit is
6115 * cleared. Some of the other VBox code recognizes the unusable bit, but
6116 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
6117 * safe side here, we'll strip off P and other bits we don't care about. If
6118 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
6119 *
6120 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
6121 */
6122 if (pSelReg->Attr.u & X86DESCATTR_UNUSABLE)
6123 {
6124 Assert(idxSel != VMX_VMCS16_GUEST_TR_SEL); /* TR is the only selector that can never be unusable. */
6125
6126 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
6127 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
6128 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
6129#ifdef VBOX_STRICT
6130 VMMRZCallRing3Disable(pVCpu);
6131 Log4Func(("Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Sel, pSelReg->Attr.u));
6132# ifdef DEBUG_bird
6133 AssertMsg((u32Attr & ~X86DESCATTR_P) == pSelReg->Attr.u,
6134 ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
6135 idxSel, u32Sel, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
6136# endif
6137 VMMRZCallRing3Enable(pVCpu);
6138#endif
6139 }
6140 return VINF_SUCCESS;
6141}
6142
6143
6144/**
6145 * Imports the guest RIP from the VMCS back into the guest-CPU context.
6146 *
6147 * @returns VBox status code.
6148 * @param pVCpu The cross context virtual CPU structure.
6149 *
6150 * @remarks Called with interrupts and/or preemption disabled, should not assert!
6151 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
6152 * instead!!!
6153 */
6154DECLINLINE(int) hmR0VmxImportGuestRip(PVMCPU pVCpu)
6155{
6156 uint64_t u64Val;
6157 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6158 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
6159 {
6160 int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
6161 if (RT_SUCCESS(rc))
6162 {
6163 pCtx->rip = u64Val;
6164 EMR0HistoryUpdatePC(pVCpu, pCtx->rip, false);
6165 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
6166 }
6167 return rc;
6168 }
6169 return VINF_SUCCESS;
6170}
6171
6172
6173/**
6174 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
6175 *
6176 * @returns VBox status code.
6177 * @param pVCpu The cross context virtual CPU structure.
6178 *
6179 * @remarks Called with interrupts and/or preemption disabled, should not assert!
6180 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
6181 * instead!!!
6182 */
6183DECLINLINE(int) hmR0VmxImportGuestRFlags(PVMCPU pVCpu)
6184{
6185 uint32_t u32Val;
6186 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6187 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
6188 {
6189 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val);
6190 if (RT_SUCCESS(rc))
6191 {
6192 pCtx->eflags.u32 = u32Val;
6193
6194 /* Restore eflags for real-on-v86-mode hack. */
6195 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6196 {
6197 pCtx->eflags.Bits.u1VM = 0;
6198 pCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
6199 }
6200 }
6201 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
6202 return rc;
6203 }
6204 return VINF_SUCCESS;
6205}
6206
6207
6208/**
6209 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
6210 * context.
6211 *
6212 * @returns VBox status code.
6213 * @param pVCpu The cross context virtual CPU structure.
6214 *
6215 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
6216 * do not log!
6217 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
6218 * instead!!!
6219 */
6220DECLINLINE(int) hmR0VmxImportGuestIntrState(PVMCPU pVCpu)
6221{
6222 uint32_t u32Val;
6223 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6224 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32Val);
6225 if (RT_SUCCESS(rc))
6226 {
6227 /*
6228 * We additionally have a requirement to import RIP, RFLAGS depending on whether we
6229 * might need them in hmR0VmxEvaluatePendingEvent().
6230 */
6231 if (!u32Val)
6232 {
6233 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6234 {
6235 rc = hmR0VmxImportGuestRip(pVCpu);
6236 rc |= hmR0VmxImportGuestRFlags(pVCpu);
6237 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6238 }
6239
6240 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6241 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6242 }
6243 else
6244 {
6245 rc = hmR0VmxImportGuestRip(pVCpu);
6246 rc |= hmR0VmxImportGuestRFlags(pVCpu);
6247
6248 if (u32Val & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
6249 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI))
6250 {
6251 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
6252 }
6253 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6254 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6255
6256 if (u32Val & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)
6257 {
6258 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6259 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6260 }
6261 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6262 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6263 }
6264 }
6265 return rc;
6266}
6267
6268
6269/**
6270 * Worker for VMXR0ImportStateOnDemand.
6271 *
6272 * @returns VBox status code.
6273 * @param pVCpu The cross context virtual CPU structure.
6274 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
6275 */
6276static int hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat)
6277{
6278#define VMXLOCAL_BREAK_RC(a_rc) \
6279 if (RT_FAILURE(a_rc)) \
6280 break
6281
6282 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x);
6283
6284 int rc = VINF_SUCCESS;
6285 PVM pVM = pVCpu->CTX_SUFF(pVM);
6286 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6287 uint64_t u64Val;
6288 uint32_t u32Val;
6289
6290 Log4Func(("fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat));
6291
6292 /*
6293 * We disable interrupts to make the updating of the state and in particular
6294 * the fExtrn modification atomic wrt to preemption hooks.
6295 */
6296 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
6297
6298 fWhat &= pCtx->fExtrn;
6299 if (fWhat)
6300 {
6301 do
6302 {
6303 if (fWhat & CPUMCTX_EXTRN_RIP)
6304 {
6305 rc = hmR0VmxImportGuestRip(pVCpu);
6306 VMXLOCAL_BREAK_RC(rc);
6307 }
6308
6309 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
6310 {
6311 rc = hmR0VmxImportGuestRFlags(pVCpu);
6312 VMXLOCAL_BREAK_RC(rc);
6313 }
6314
6315 if (fWhat & CPUMCTX_EXTRN_HM_VMX_INT_STATE)
6316 {
6317 rc = hmR0VmxImportGuestIntrState(pVCpu);
6318 VMXLOCAL_BREAK_RC(rc);
6319 }
6320
6321 if (fWhat & CPUMCTX_EXTRN_RSP)
6322 {
6323 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
6324 VMXLOCAL_BREAK_RC(rc);
6325 pCtx->rsp = u64Val;
6326 }
6327
6328 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
6329 {
6330 if (fWhat & CPUMCTX_EXTRN_CS)
6331 {
6332 rc = HMVMX_IMPORT_SREG(CS, &pCtx->cs);
6333 rc |= hmR0VmxImportGuestRip(pVCpu);
6334 VMXLOCAL_BREAK_RC(rc);
6335 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6336 pCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
6337 EMR0HistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true);
6338 }
6339 if (fWhat & CPUMCTX_EXTRN_SS)
6340 {
6341 rc = HMVMX_IMPORT_SREG(SS, &pCtx->ss);
6342 VMXLOCAL_BREAK_RC(rc);
6343 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6344 pCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
6345 }
6346 if (fWhat & CPUMCTX_EXTRN_DS)
6347 {
6348 rc = HMVMX_IMPORT_SREG(DS, &pCtx->ds);
6349 VMXLOCAL_BREAK_RC(rc);
6350 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6351 pCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
6352 }
6353 if (fWhat & CPUMCTX_EXTRN_ES)
6354 {
6355 rc = HMVMX_IMPORT_SREG(ES, &pCtx->es);
6356 VMXLOCAL_BREAK_RC(rc);
6357 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6358 pCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
6359 }
6360 if (fWhat & CPUMCTX_EXTRN_FS)
6361 {
6362 rc = HMVMX_IMPORT_SREG(FS, &pCtx->fs);
6363 VMXLOCAL_BREAK_RC(rc);
6364 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6365 pCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
6366 }
6367 if (fWhat & CPUMCTX_EXTRN_GS)
6368 {
6369 rc = HMVMX_IMPORT_SREG(GS, &pCtx->gs);
6370 VMXLOCAL_BREAK_RC(rc);
6371 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6372 pCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
6373 }
6374 }
6375
6376 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
6377 {
6378 if (fWhat & CPUMCTX_EXTRN_LDTR)
6379 {
6380 rc = HMVMX_IMPORT_SREG(LDTR, &pCtx->ldtr);
6381 VMXLOCAL_BREAK_RC(rc);
6382 }
6383
6384 if (fWhat & CPUMCTX_EXTRN_GDTR)
6385 {
6386 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
6387 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
6388 VMXLOCAL_BREAK_RC(rc);
6389 pCtx->gdtr.pGdt = u64Val;
6390 pCtx->gdtr.cbGdt = u32Val;
6391 }
6392
6393 /* Guest IDTR. */
6394 if (fWhat & CPUMCTX_EXTRN_IDTR)
6395 {
6396 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
6397 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
6398 VMXLOCAL_BREAK_RC(rc);
6399 pCtx->idtr.pIdt = u64Val;
6400 pCtx->idtr.cbIdt = u32Val;
6401 }
6402
6403 /* Guest TR. */
6404 if (fWhat & CPUMCTX_EXTRN_TR)
6405 {
6406 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR, don't save that one. */
6407 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6408 {
6409 rc = HMVMX_IMPORT_SREG(TR, &pCtx->tr);
6410 VMXLOCAL_BREAK_RC(rc);
6411 }
6412 }
6413 }
6414
6415 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
6416 {
6417 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip);
6418 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp);
6419 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val);
6420 pCtx->SysEnter.cs = u32Val;
6421 VMXLOCAL_BREAK_RC(rc);
6422 }
6423
6424#if HC_ARCH_BITS == 64
6425 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
6426 {
6427 if ( pVM->hm.s.fAllow64BitGuests
6428 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
6429 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
6430 }
6431
6432 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
6433 {
6434 if ( pVM->hm.s.fAllow64BitGuests
6435 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
6436 {
6437 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
6438 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
6439 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
6440 }
6441 }
6442#endif
6443
6444 if ( (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
6445#if HC_ARCH_BITS == 32
6446 || (fWhat & (CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS))
6447#endif
6448 )
6449 {
6450 PCVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
6451 uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs;
6452 for (uint32_t i = 0; i < cMsrs; i++, pMsr++)
6453 {
6454 switch (pMsr->u32Msr)
6455 {
6456#if HC_ARCH_BITS == 32
6457 case MSR_K8_LSTAR: pCtx->msrLSTAR = pMsr->u64Value; break;
6458 case MSR_K6_STAR: pCtx->msrSTAR = pMsr->u64Value; break;
6459 case MSR_K8_SF_MASK: pCtx->msrSFMASK = pMsr->u64Value; break;
6460 case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsr->u64Value; break;
6461#endif
6462 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value); break;
6463 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsr->u64Value); break;
6464 case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit */ break;
6465 default:
6466 {
6467 pVCpu->hm.s.u32HMError = pMsr->u32Msr;
6468 ASMSetFlags(fEFlags);
6469 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr,
6470 cMsrs));
6471 return VERR_HM_UNEXPECTED_LD_ST_MSR;
6472 }
6473 }
6474 }
6475 }
6476
6477 if (fWhat & CPUMCTX_EXTRN_DR7)
6478 {
6479 if (!pVCpu->hm.s.fUsingHyperDR7)
6480 {
6481 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
6482 rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val);
6483 VMXLOCAL_BREAK_RC(rc);
6484 pCtx->dr[7] = u32Val;
6485 }
6486 }
6487
6488 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
6489 {
6490 uint32_t u32Shadow;
6491 if (fWhat & CPUMCTX_EXTRN_CR0)
6492 {
6493 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val);
6494 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u32Shadow);
6495 VMXLOCAL_BREAK_RC(rc);
6496 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32Cr0Mask)
6497 | (u32Shadow & pVCpu->hm.s.vmx.u32Cr0Mask);
6498 VMMRZCallRing3Disable(pVCpu); /* Calls into PGM which has Log statements. */
6499 CPUMSetGuestCR0(pVCpu, u32Val);
6500 VMMRZCallRing3Enable(pVCpu);
6501 }
6502
6503 if (fWhat & CPUMCTX_EXTRN_CR4)
6504 {
6505 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val);
6506 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u32Shadow);
6507 VMXLOCAL_BREAK_RC(rc);
6508 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32Cr4Mask)
6509 | (u32Shadow & pVCpu->hm.s.vmx.u32Cr4Mask);
6510 CPUMSetGuestCR4(pVCpu, u32Val);
6511 }
6512
6513 if (fWhat & CPUMCTX_EXTRN_CR3)
6514 {
6515 /* CR0.PG bit changes are always intercepted, so it's up to date. */
6516 if ( pVM->hm.s.vmx.fUnrestrictedGuest
6517 || ( pVM->hm.s.fNestedPaging
6518 && CPUMIsGuestPagingEnabledEx(pCtx)))
6519 {
6520 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
6521 if (pCtx->cr3 != u64Val)
6522 {
6523 CPUMSetGuestCR3(pVCpu, u64Val);
6524 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
6525 }
6526
6527 /* If the guest is in PAE mode, sync back the PDPE's into the guest state.
6528 Note: CR4.PAE, CR0.PG, EFER bit changes are always intercepted, so they're up to date. */
6529 if (CPUMIsGuestInPAEModeEx(pCtx))
6530 {
6531 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);
6532 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);
6533 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);
6534 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);
6535 VMXLOCAL_BREAK_RC(rc);
6536 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
6537 }
6538 }
6539 }
6540 }
6541 } while (0);
6542
6543 if (RT_SUCCESS(rc))
6544 {
6545 /* Update fExtrn. */
6546 pCtx->fExtrn &= ~fWhat;
6547
6548 /* If everything has been imported, clear the HM keeper bit. */
6549 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
6550 {
6551 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
6552 Assert(!pCtx->fExtrn);
6553 }
6554 }
6555 }
6556 else
6557 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
6558
6559 ASMSetFlags(fEFlags);
6560
6561 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatImportGuestState, x);
6562
6563 /*
6564 * Honor any pending CR3 updates.
6565 *
6566 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
6567 * -> VMMRZCallRing3Disable() -> hmR0VmxImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
6568 * -> continue with VM-exit handling -> hmR0VmxImportGuestState() and here we are.
6569 *
6570 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
6571 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
6572 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
6573 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
6574 *
6575 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
6576 */
6577 if (VMMRZCallRing3IsEnabled(pVCpu))
6578 {
6579 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6580 {
6581 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
6582 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
6583 }
6584
6585 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6586 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6587
6588 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6589 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6590 }
6591
6592 return VINF_SUCCESS;
6593#undef VMXLOCAL_BREAK_RC
6594}
6595
6596
6597/**
6598 * Saves the guest state from the VMCS into the guest-CPU context.
6599 *
6600 * @returns VBox status code.
6601 * @param pVCpu The cross context virtual CPU structure.
6602 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
6603 */
6604VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat)
6605{
6606 return hmR0VmxImportGuestState(pVCpu, fWhat);
6607}
6608
6609
6610/**
6611 * Check per-VM and per-VCPU force flag actions that require us to go back to
6612 * ring-3 for one reason or another.
6613 *
6614 * @returns Strict VBox status code (i.e. informational status codes too)
6615 * @retval VINF_SUCCESS if we don't have any actions that require going back to
6616 * ring-3.
6617 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
6618 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
6619 * interrupts)
6620 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
6621 * all EMTs to be in ring-3.
6622 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
6623 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
6624 * to the EM loop.
6625 *
6626 * @param pVCpu The cross context virtual CPU structure.
6627 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6628 * out-of-sync. Make sure to update the required fields
6629 * before using them.
6630 * @param fStepping Running in hmR0VmxRunGuestCodeStep().
6631 */
6632static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping)
6633{
6634 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6635
6636 /*
6637 * Anything pending? Should be more likely than not if we're doing a good job.
6638 */
6639 PVM pVM = pVCpu->CTX_SUFF(pVM);
6640 if ( !fStepping
6641 ? !VM_FF_IS_PENDING(pVM, VM_FF_HP_R0_PRE_HM_MASK)
6642 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
6643 : !VM_FF_IS_PENDING(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
6644 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
6645 return VINF_SUCCESS;
6646
6647 /* Pending PGM C3 sync. */
6648 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
6649 {
6650 Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
6651 VBOXSTRICTRC rcStrict2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4,
6652 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
6653 if (rcStrict2 != VINF_SUCCESS)
6654 {
6655 AssertRC(VBOXSTRICTRC_VAL(rcStrict2));
6656 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict2)));
6657 return rcStrict2;
6658 }
6659 }
6660
6661 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
6662 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
6663 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
6664 {
6665 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
6666 int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
6667 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
6668 return rc2;
6669 }
6670
6671 /* Pending VM request packets, such as hardware interrupts. */
6672 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
6673 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
6674 {
6675 Log4Func(("Pending VM request forcing us back to ring-3\n"));
6676 return VINF_EM_PENDING_REQUEST;
6677 }
6678
6679 /* Pending PGM pool flushes. */
6680 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
6681 {
6682 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
6683 return VINF_PGM_POOL_FLUSH_PENDING;
6684 }
6685
6686 /* Pending DMA requests. */
6687 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
6688 {
6689 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
6690 return VINF_EM_RAW_TO_R3;
6691 }
6692
6693 return VINF_SUCCESS;
6694}
6695
6696
6697/**
6698 * Converts any TRPM trap into a pending HM event. This is typically used when
6699 * entering from ring-3 (not longjmp returns).
6700 *
6701 * @param pVCpu The cross context virtual CPU structure.
6702 */
6703static void hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
6704{
6705 Assert(TRPMHasTrap(pVCpu));
6706 Assert(!pVCpu->hm.s.Event.fPending);
6707
6708 uint8_t uVector;
6709 TRPMEVENT enmTrpmEvent;
6710 RTGCUINT uErrCode;
6711 RTGCUINTPTR GCPtrFaultAddress;
6712 uint8_t cbInstr;
6713
6714 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
6715 AssertRC(rc);
6716
6717 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntInfo. */
6718 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
6719 if (enmTrpmEvent == TRPM_TRAP)
6720 {
6721 switch (uVector)
6722 {
6723 case X86_XCPT_NMI:
6724 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6725 break;
6726
6727 case X86_XCPT_BP:
6728 case X86_XCPT_OF:
6729 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6730 break;
6731
6732 case X86_XCPT_PF:
6733 case X86_XCPT_DF:
6734 case X86_XCPT_TS:
6735 case X86_XCPT_NP:
6736 case X86_XCPT_SS:
6737 case X86_XCPT_GP:
6738 case X86_XCPT_AC:
6739 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6740 RT_FALL_THRU();
6741 default:
6742 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6743 break;
6744 }
6745 }
6746 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
6747 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6748 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
6749 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6750 else
6751 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
6752
6753 rc = TRPMResetTrap(pVCpu);
6754 AssertRC(rc);
6755 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
6756 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
6757
6758 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
6759}
6760
6761
6762/**
6763 * Converts the pending HM event into a TRPM trap.
6764 *
6765 * @param pVCpu The cross context virtual CPU structure.
6766 */
6767static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
6768{
6769 Assert(pVCpu->hm.s.Event.fPending);
6770
6771 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
6772 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo);
6773 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntInfo);
6774 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
6775
6776 /* If a trap was already pending, we did something wrong! */
6777 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
6778
6779 TRPMEVENT enmTrapType;
6780 switch (uVectorType)
6781 {
6782 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
6783 enmTrapType = TRPM_HARDWARE_INT;
6784 break;
6785
6786 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
6787 enmTrapType = TRPM_SOFTWARE_INT;
6788 break;
6789
6790 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
6791 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
6792 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
6793 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
6794 enmTrapType = TRPM_TRAP;
6795 break;
6796
6797 default:
6798 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
6799 enmTrapType = TRPM_32BIT_HACK;
6800 break;
6801 }
6802
6803 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
6804
6805 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
6806 AssertRC(rc);
6807
6808 if (fErrorCodeValid)
6809 TRPMSetErrorCode(pVCpu, uErrorCode);
6810
6811 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6812 && uVector == X86_XCPT_PF)
6813 {
6814 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
6815 }
6816 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6817 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6818 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6819 {
6820 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6821 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
6822 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
6823 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
6824 }
6825
6826 /* Clear any pending events from the VMCS. */
6827 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0); AssertRC(rc);
6828 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0); AssertRC(rc);
6829
6830 /* We're now done converting the pending event. */
6831 pVCpu->hm.s.Event.fPending = false;
6832}
6833
6834
6835/**
6836 * Does the necessary state syncing before returning to ring-3 for any reason
6837 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
6838 *
6839 * @returns VBox status code.
6840 * @param pVCpu The cross context virtual CPU structure.
6841 * @param fImportState Whether to import the guest state from the VMCS back
6842 * to the guest-CPU context.
6843 *
6844 * @remarks No-long-jmp zone!!!
6845 */
6846static int hmR0VmxLeave(PVMCPU pVCpu, bool fImportState)
6847{
6848 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6849 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6850
6851 RTCPUID idCpu = RTMpCpuId();
6852 Log4Func(("HostCpuId=%u\n", idCpu));
6853
6854 /*
6855 * !!! IMPORTANT !!!
6856 * If you modify code here, check whether hmR0VmxCallRing3Callback() needs to be updated too.
6857 */
6858
6859 /* Save the guest state if necessary. */
6860 if (fImportState)
6861 {
6862 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
6863 AssertRCReturn(rc, rc);
6864 }
6865
6866 /* Restore host FPU state if necessary. We will resync on next R0 reentry. */
6867 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
6868 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
6869
6870 /* Restore host debug registers if necessary. We will resync on next R0 reentry. */
6871#ifdef VBOX_STRICT
6872 if (CPUMIsHyperDebugStateActive(pVCpu))
6873 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
6874#endif
6875 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
6876 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu));
6877 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu));
6878
6879#if HC_ARCH_BITS == 64
6880 /* Restore host-state bits that VT-x only restores partially. */
6881 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
6882 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
6883 {
6884 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu));
6885 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
6886 }
6887 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
6888#endif
6889
6890 /* Restore the lazy host MSRs as we're leaving VT-x context. */
6891 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
6892 {
6893 /* We shouldn't restore the host MSRs without saving the guest MSRs first. */
6894 if (!fImportState)
6895 {
6896 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS);
6897 AssertRCReturn(rc, rc);
6898 }
6899 hmR0VmxLazyRestoreHostMsrs(pVCpu);
6900 Assert(!pVCpu->hm.s.vmx.fLazyMsrs);
6901 }
6902 else
6903 pVCpu->hm.s.vmx.fLazyMsrs = 0;
6904
6905 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
6906 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
6907
6908 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
6909 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState);
6910 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState);
6911 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit);
6912 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling);
6913 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
6914 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
6915 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
6916 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
6917
6918 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
6919
6920 /** @todo This partially defeats the purpose of having preemption hooks.
6921 * The problem is, deregistering the hooks should be moved to a place that
6922 * lasts until the EMT is about to be destroyed not everytime while leaving HM
6923 * context.
6924 */
6925 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
6926 {
6927 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
6928 AssertRCReturn(rc, rc);
6929
6930 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
6931 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
6932 }
6933 Assert(!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
6934 NOREF(idCpu);
6935
6936 return VINF_SUCCESS;
6937}
6938
6939
6940/**
6941 * Leaves the VT-x session.
6942 *
6943 * @returns VBox status code.
6944 * @param pVCpu The cross context virtual CPU structure.
6945 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6946 * out-of-sync. Make sure to update the required fields
6947 * before using them.
6948 *
6949 * @remarks No-long-jmp zone!!!
6950 */
6951static int hmR0VmxLeaveSession(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6952{
6953 HM_DISABLE_PREEMPT();
6954 HMVMX_ASSERT_CPU_SAFE();
6955 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6956 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6957
6958 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
6959 and done this from the VMXR0ThreadCtxCallback(). */
6960 if (!pVCpu->hm.s.fLeaveDone)
6961 {
6962 int rc2 = hmR0VmxLeave(pVCpu, true /* fImportState */);
6963 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2);
6964 pVCpu->hm.s.fLeaveDone = true;
6965 }
6966 Assert(!pMixedCtx->fExtrn); NOREF(pMixedCtx);
6967
6968 /*
6969 * !!! IMPORTANT !!!
6970 * If you modify code here, make sure to check whether hmR0VmxCallRing3Callback() needs to be updated too.
6971 */
6972
6973 /* Deregister hook now that we've left HM context before re-enabling preemption. */
6974 /** @todo Deregistering here means we need to VMCLEAR always
6975 * (longjmp/exit-to-r3) in VT-x which is not efficient. */
6976 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
6977 VMMR0ThreadCtxHookDisable(pVCpu);
6978
6979 /* Leave HM context. This takes care of local init (term). */
6980 int rc = HMR0LeaveCpu(pVCpu);
6981
6982 HM_RESTORE_PREEMPT();
6983 return rc;
6984}
6985
6986
6987/**
6988 * Does the necessary state syncing before doing a longjmp to ring-3.
6989 *
6990 * @returns VBox status code.
6991 * @param pVCpu The cross context virtual CPU structure.
6992 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6993 * out-of-sync. Make sure to update the required fields
6994 * before using them.
6995 *
6996 * @remarks No-long-jmp zone!!!
6997 */
6998DECLINLINE(int) hmR0VmxLongJmpToRing3(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6999{
7000 return hmR0VmxLeaveSession(pVCpu, pMixedCtx);
7001}
7002
7003
7004/**
7005 * Take necessary actions before going back to ring-3.
7006 *
7007 * An action requires us to go back to ring-3. This function does the necessary
7008 * steps before we can safely return to ring-3. This is not the same as longjmps
7009 * to ring-3, this is voluntary and prepares the guest so it may continue
7010 * executing outside HM (recompiler/IEM).
7011 *
7012 * @returns VBox status code.
7013 * @param pVCpu The cross context virtual CPU structure.
7014 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7015 * out-of-sync. Make sure to update the required fields
7016 * before using them.
7017 * @param rcExit The reason for exiting to ring-3. Can be
7018 * VINF_VMM_UNKNOWN_RING3_CALL.
7019 */
7020static int hmR0VmxExitToRing3(PVMCPU pVCpu, PCPUMCTX pMixedCtx, VBOXSTRICTRC rcExit)
7021{
7022 Assert(pVCpu);
7023 Assert(pMixedCtx);
7024 HMVMX_ASSERT_PREEMPT_SAFE();
7025
7026 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
7027 {
7028 VMXGetActivatedVmcs(&pVCpu->hm.s.vmx.LastError.u64VMCSPhys);
7029 pVCpu->hm.s.vmx.LastError.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
7030 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
7031 /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */
7032 }
7033
7034 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
7035 VMMRZCallRing3Disable(pVCpu);
7036 Log4Func(("rcExit=%d\n", VBOXSTRICTRC_VAL(rcExit)));
7037
7038 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
7039 if (pVCpu->hm.s.Event.fPending)
7040 {
7041 hmR0VmxPendingEventToTrpmTrap(pVCpu);
7042 Assert(!pVCpu->hm.s.Event.fPending);
7043 }
7044
7045 /* Clear interrupt-window and NMI-window controls as we re-evaluate it when we return from ring-3. */
7046 hmR0VmxClearIntNmiWindowsVmcs(pVCpu);
7047
7048 /* If we're emulating an instruction, we shouldn't have any TRPM traps pending
7049 and if we're injecting an event we should have a TRPM trap pending. */
7050 AssertMsg(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
7051#ifndef DEBUG_bird /* Triggered after firing an NMI against NT4SP1, possibly a triple fault in progress. */
7052 AssertMsg(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
7053#endif
7054
7055 /* Save guest state and restore host state bits. */
7056 int rc = hmR0VmxLeaveSession(pVCpu, pMixedCtx);
7057 AssertRCReturn(rc, rc);
7058 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7059 /* Thread-context hooks are unregistered at this point!!! */
7060
7061 /* Sync recompiler state. */
7062 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
7063 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
7064 | CPUM_CHANGED_LDTR
7065 | CPUM_CHANGED_GDTR
7066 | CPUM_CHANGED_IDTR
7067 | CPUM_CHANGED_TR
7068 | CPUM_CHANGED_HIDDEN_SEL_REGS);
7069 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging
7070 && CPUMIsGuestPagingEnabledEx(pMixedCtx))
7071 {
7072 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
7073 }
7074
7075 Assert(!pVCpu->hm.s.fClearTrapFlag);
7076
7077 /* Update the exit-to-ring 3 reason. */
7078 pVCpu->hm.s.rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit);
7079
7080 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
7081 if (rcExit != VINF_EM_RAW_INTERRUPT)
7082 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
7083
7084 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
7085
7086 /* We do -not- want any longjmp notifications after this! We must return to ring-3 ASAP. */
7087 VMMRZCallRing3RemoveNotification(pVCpu);
7088 VMMRZCallRing3Enable(pVCpu);
7089
7090 return rc;
7091}
7092
7093
7094/**
7095 * VMMRZCallRing3() callback wrapper which saves the guest state before we
7096 * longjump to ring-3 and possibly get preempted.
7097 *
7098 * @returns VBox status code.
7099 * @param pVCpu The cross context virtual CPU structure.
7100 * @param enmOperation The operation causing the ring-3 longjump.
7101 * @param pvUser Opaque pointer to the guest-CPU context. The data
7102 * may be out-of-sync. Make sure to update the required
7103 * fields before using them.
7104 */
7105static DECLCALLBACK(int) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
7106{
7107 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
7108 {
7109 /*
7110 * !!! IMPORTANT !!!
7111 * If you modify code here, check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs to be updated too.
7112 * This is a stripped down version which gets out ASAP, trying to not trigger any further assertions.
7113 */
7114 VMMRZCallRing3RemoveNotification(pVCpu);
7115 VMMRZCallRing3Disable(pVCpu);
7116 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
7117 RTThreadPreemptDisable(&PreemptState);
7118
7119 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
7120 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
7121 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
7122
7123#if HC_ARCH_BITS == 64
7124 /* Restore host-state bits that VT-x only restores partially. */
7125 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
7126 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
7127 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
7128 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
7129#endif
7130
7131 /* Restore the lazy host MSRs as we're leaving VT-x context. */
7132 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
7133 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7134
7135 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
7136 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7137 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7138 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7139 {
7140 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7141 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7142 }
7143
7144 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
7145 VMMR0ThreadCtxHookDisable(pVCpu);
7146 HMR0LeaveCpu(pVCpu);
7147 RTThreadPreemptRestore(&PreemptState);
7148 return VINF_SUCCESS;
7149 }
7150
7151 Assert(pVCpu);
7152 Assert(pvUser);
7153 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7154 HMVMX_ASSERT_PREEMPT_SAFE();
7155
7156 VMMRZCallRing3Disable(pVCpu);
7157 Assert(VMMR0IsLogFlushDisabled(pVCpu));
7158
7159 Log4Func((" -> hmR0VmxLongJmpToRing3 enmOperation=%d\n", enmOperation));
7160
7161 int rc = hmR0VmxLongJmpToRing3(pVCpu, (PCPUMCTX)pvUser);
7162 AssertRCReturn(rc, rc);
7163
7164 VMMRZCallRing3Enable(pVCpu);
7165 return VINF_SUCCESS;
7166}
7167
7168
7169/**
7170 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
7171 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
7172 *
7173 * @param pVCpu The cross context virtual CPU structure.
7174 */
7175DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
7176{
7177 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7178 {
7179 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7180 {
7181 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7182 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7183 AssertRC(rc);
7184 Log4Func(("Setup interrupt-window exiting\n"));
7185 }
7186 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
7187}
7188
7189
7190/**
7191 * Clears the interrupt-window exiting control in the VMCS.
7192 *
7193 * @param pVCpu The cross context virtual CPU structure.
7194 */
7195DECLINLINE(void) hmR0VmxClearIntWindowExitVmcs(PVMCPU pVCpu)
7196{
7197 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7198 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7199 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7200 AssertRC(rc);
7201 Log4Func(("Cleared interrupt-window exiting\n"));
7202}
7203
7204
7205/**
7206 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
7207 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
7208 *
7209 * @param pVCpu The cross context virtual CPU structure.
7210 */
7211DECLINLINE(void) hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu)
7212{
7213 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7214 {
7215 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7216 {
7217 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7218 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7219 AssertRC(rc);
7220 Log4Func(("Setup NMI-window exiting\n"));
7221 }
7222 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
7223}
7224
7225
7226/**
7227 * Clears the NMI-window exiting control in the VMCS.
7228 *
7229 * @param pVCpu The cross context virtual CPU structure.
7230 */
7231DECLINLINE(void) hmR0VmxClearNmiWindowExitVmcs(PVMCPU pVCpu)
7232{
7233 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT);
7234 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7235 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7236 AssertRC(rc);
7237 Log4Func(("Cleared NMI-window exiting\n"));
7238}
7239
7240
7241/**
7242 * Evaluates the event to be delivered to the guest and sets it as the pending
7243 * event.
7244 *
7245 * @returns The VT-x guest-interruptibility state.
7246 * @param pVCpu The cross context virtual CPU structure.
7247 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7248 * out-of-sync. Make sure to update the required fields
7249 * before using them.
7250 */
7251static uint32_t hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7252{
7253 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
7254 uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7255 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7256 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7257 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
7258
7259 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));
7260 Assert(!(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7261 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7262 Assert(!TRPMHasTrap(pVCpu));
7263
7264 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
7265 APICUpdatePendingInterrupts(pVCpu);
7266
7267 /*
7268 * Toggling of interrupt force-flags here is safe since we update TRPM on premature exits
7269 * to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must NOT restore these force-flags.
7270 */
7271 /** @todo SMI. SMIs take priority over NMIs. */
7272 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */
7273 {
7274 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
7275 if ( !pVCpu->hm.s.Event.fPending
7276 && !fBlockNmi
7277 && !fBlockSti
7278 && !fBlockMovSS)
7279 {
7280 Log4Func(("Pending NMI\n"));
7281 uint32_t u32IntInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
7282 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7283
7284 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7285 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
7286 }
7287 else
7288 hmR0VmxSetNmiWindowExitVmcs(pVCpu);
7289 }
7290 /*
7291 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns
7292 * a valid interrupt we must- deliver the interrupt. We can no longer re-request it from the APIC.
7293 */
7294 else if ( VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
7295 && !pVCpu->hm.s.fSingleInstruction)
7296 {
7297 Assert(!DBGFIsStepping(pVCpu));
7298 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);
7299 AssertRCReturn(rc, 0);
7300 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7301 if ( !pVCpu->hm.s.Event.fPending
7302 && !fBlockInt
7303 && !fBlockSti
7304 && !fBlockMovSS)
7305 {
7306 uint8_t u8Interrupt;
7307 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
7308 if (RT_SUCCESS(rc))
7309 {
7310 Log4Func(("Pending external interrupt u8Interrupt=%#x\n", u8Interrupt));
7311 uint32_t u32IntInfo = u8Interrupt
7312 | VMX_EXIT_INTERRUPTION_INFO_VALID
7313 | (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7314
7315 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrfaultAddress */);
7316 }
7317 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
7318 {
7319 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
7320 hmR0VmxApicSetTprThreshold(pVCpu, u8Interrupt >> 4);
7321 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
7322
7323 /*
7324 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
7325 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
7326 * need to re-set this force-flag here.
7327 */
7328 }
7329 else
7330 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
7331 }
7332 else
7333 hmR0VmxSetIntWindowExitVmcs(pVCpu);
7334 }
7335
7336 return fIntrState;
7337}
7338
7339
7340/**
7341 * Sets a pending-debug exception to be delivered to the guest if the guest is
7342 * single-stepping in the VMCS.
7343 *
7344 * @param pVCpu The cross context virtual CPU structure.
7345 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7346 * out-of-sync. Make sure to update the required fields
7347 * before using them.
7348 */
7349DECLINLINE(int) hmR0VmxSetPendingDebugXcptVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7350{
7351 RT_NOREF(pVCpu);
7352 Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS)); NOREF(pMixedCtx);
7353 return VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
7354}
7355
7356
7357/**
7358 * Injects any pending events into the guest if the guest is in a state to
7359 * receive them.
7360 *
7361 * @returns Strict VBox status code (i.e. informational status codes too).
7362 * @param pVCpu The cross context virtual CPU structure.
7363 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7364 * out-of-sync. Make sure to update the required fields
7365 * before using them.
7366 * @param fIntrState The VT-x guest-interruptibility state.
7367 * @param fStepping Running in hmR0VmxRunGuestCodeStep() and we should
7368 * return VINF_EM_DBG_STEPPED if the event was
7369 * dispatched directly.
7370 */
7371static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t fIntrState, bool fStepping)
7372{
7373 HMVMX_ASSERT_PREEMPT_SAFE();
7374 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7375
7376 bool fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7377 bool fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7378
7379 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));
7380 Assert(!(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7381 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7382 Assert(!TRPMHasTrap(pVCpu));
7383
7384 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
7385 if (pVCpu->hm.s.Event.fPending)
7386 {
7387 /*
7388 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
7389 * pending even while injecting an event and in this case, we want a VM-exit as soon as
7390 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
7391 *
7392 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
7393 */
7394 uint32_t const uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
7395#ifdef VBOX_STRICT
7396 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7397 {
7398 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7399 Assert(!fBlockInt);
7400 Assert(!fBlockSti);
7401 Assert(!fBlockMovSS);
7402 }
7403 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
7404 {
7405 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
7406 Assert(!fBlockSti);
7407 Assert(!fBlockMovSS);
7408 Assert(!fBlockNmi);
7409 }
7410#endif
7411 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
7412 uIntType));
7413 rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
7414 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping,
7415 &fIntrState);
7416 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
7417
7418 /* Update the interruptibility-state as it could have been changed by
7419 hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */
7420 fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7421 fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7422
7423 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7424 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
7425 else
7426 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
7427 }
7428
7429 /* Deliver pending debug exception if the guest is single-stepping. Evaluate and set the BS bit. */
7430 if ( fBlockSti
7431 || fBlockMovSS)
7432 {
7433 if (!pVCpu->hm.s.fSingleInstruction)
7434 {
7435 /*
7436 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD,
7437 * VMX_EXIT_MTF, VMX_EXIT_APIC_WRITE and VMX_EXIT_VIRTUALIZED_EOI.
7438 * See Intel spec. 27.3.4 "Saving Non-Register State".
7439 */
7440 Assert(!DBGFIsStepping(pVCpu));
7441 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);
7442 AssertRCReturn(rc, rc);
7443 if (pMixedCtx->eflags.Bits.u1TF)
7444 {
7445 int rc2 = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
7446 AssertRCReturn(rc2, rc2);
7447 }
7448 }
7449 else if (pMixedCtx->eflags.Bits.u1TF)
7450 {
7451 /*
7452 * We are single-stepping in the hypervisor debugger using EFLAGS.TF. Clear interrupt inhibition as setting the
7453 * BS bit would mean delivering a #DB to the guest upon VM-entry when it shouldn't be.
7454 */
7455 Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG));
7456 fIntrState = 0;
7457 }
7458 }
7459
7460 /*
7461 * There's no need to clear the VM-entry interruption-information field here if we're not injecting anything.
7462 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7463 */
7464 int rc3 = hmR0VmxExportGuestIntrState(pVCpu, fIntrState);
7465 AssertRCReturn(rc3, rc3);
7466
7467 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
7468 NOREF(fBlockMovSS); NOREF(fBlockSti);
7469 return rcStrict;
7470}
7471
7472
7473/**
7474 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
7475 *
7476 * @param pVCpu The cross context virtual CPU structure.
7477 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7478 * out-of-sync. Make sure to update the required fields
7479 * before using them.
7480 */
7481DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7482{
7483 NOREF(pMixedCtx);
7484 uint32_t u32IntInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID;
7485 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7486}
7487
7488
7489/**
7490 * Injects a double-fault (\#DF) exception into the VM.
7491 *
7492 * @returns Strict VBox status code (i.e. informational status codes too).
7493 * @param pVCpu The cross context virtual CPU structure.
7494 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7495 * out-of-sync. Make sure to update the required fields
7496 * before using them.
7497 * @param fStepping Whether we're running in hmR0VmxRunGuestCodeStep()
7498 * and should return VINF_EM_DBG_STEPPED if the event
7499 * is injected directly (register modified by us, not
7500 * by hardware on VM-entry).
7501 * @param pfIntrState Pointer to the current guest interruptibility-state.
7502 * This interruptibility-state will be updated if
7503 * necessary. This cannot not be NULL.
7504 */
7505DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCCPUMCTX pMixedCtx, bool fStepping, uint32_t *pfIntrState)
7506{
7507 NOREF(pMixedCtx);
7508 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7509 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7510 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7511 return hmR0VmxInjectEventVmcs(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */, fStepping,
7512 pfIntrState);
7513}
7514
7515
7516/**
7517 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
7518 *
7519 * @param pVCpu The cross context virtual CPU structure.
7520 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7521 * out-of-sync. Make sure to update the required fields
7522 * before using them.
7523 */
7524DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7525{
7526 NOREF(pMixedCtx);
7527 uint32_t u32IntInfo = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID;
7528 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7529 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7530}
7531
7532
7533/**
7534 * Sets an overflow (\#OF) exception as pending-for-injection into the VM.
7535 *
7536 * @param pVCpu The cross context virtual CPU structure.
7537 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7538 * out-of-sync. Make sure to update the required fields
7539 * before using them.
7540 * @param cbInstr The value of RIP that is to be pushed on the guest
7541 * stack.
7542 */
7543DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
7544{
7545 NOREF(pMixedCtx);
7546 uint32_t u32IntInfo = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7547 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7548 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7549}
7550
7551
7552/**
7553 * Injects a general-protection (\#GP) fault into the VM.
7554 *
7555 * @returns Strict VBox status code (i.e. informational status codes too).
7556 * @param pVCpu The cross context virtual CPU structure.
7557 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7558 * out-of-sync. Make sure to update the required fields
7559 * before using them.
7560 * @param fErrorCodeValid Whether the error code is valid (depends on the CPU
7561 * mode, i.e. in real-mode it's not valid).
7562 * @param u32ErrorCode The error code associated with the \#GP.
7563 * @param fStepping Whether we're running in
7564 * hmR0VmxRunGuestCodeStep() and should return
7565 * VINF_EM_DBG_STEPPED if the event is injected
7566 * directly (register modified by us, not by
7567 * hardware on VM-entry).
7568 * @param pfIntrState Pointer to the current guest interruptibility-state.
7569 * This interruptibility-state will be updated if
7570 * necessary. This cannot not be NULL.
7571 */
7572DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
7573 bool fStepping, uint32_t *pfIntrState)
7574{
7575 NOREF(pMixedCtx);
7576 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7577 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7578 if (fErrorCodeValid)
7579 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7580 return hmR0VmxInjectEventVmcs(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */, fStepping,
7581 pfIntrState);
7582}
7583
7584
7585#if 0 /* unused */
7586/**
7587 * Sets a general-protection (\#GP) exception as pending-for-injection into the
7588 * VM.
7589 *
7590 * @param pVCpu The cross context virtual CPU structure.
7591 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7592 * out-of-sync. Make sure to update the required fields
7593 * before using them.
7594 * @param u32ErrorCode The error code associated with the \#GP.
7595 */
7596DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t u32ErrorCode)
7597{
7598 NOREF(pMixedCtx);
7599 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7600 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7601 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7602 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */);
7603}
7604#endif /* unused */
7605
7606
7607/**
7608 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
7609 *
7610 * @param pVCpu The cross context virtual CPU structure.
7611 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7612 * out-of-sync. Make sure to update the required fields
7613 * before using them.
7614 * @param uVector The software interrupt vector number.
7615 * @param cbInstr The value of RIP that is to be pushed on the guest
7616 * stack.
7617 */
7618DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
7619{
7620 NOREF(pMixedCtx);
7621 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
7622 if ( uVector == X86_XCPT_BP
7623 || uVector == X86_XCPT_OF)
7624 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7625 else
7626 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7627 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7628}
7629
7630
7631/**
7632 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
7633 * stack.
7634 *
7635 * @returns Strict VBox status code (i.e. informational status codes too).
7636 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
7637 * @param pVM The cross context VM structure.
7638 * @param pMixedCtx Pointer to the guest-CPU context.
7639 * @param uValue The value to push to the guest stack.
7640 */
7641DECLINLINE(VBOXSTRICTRC) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
7642{
7643 /*
7644 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
7645 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
7646 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
7647 */
7648 if (pMixedCtx->sp == 1)
7649 return VINF_EM_RESET;
7650 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
7651 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
7652 AssertRC(rc);
7653 return rc;
7654}
7655
7656
7657/**
7658 * Injects an event into the guest upon VM-entry by updating the relevant fields
7659 * in the VM-entry area in the VMCS.
7660 *
7661 * @returns Strict VBox status code (i.e. informational status codes too).
7662 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
7663 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
7664 *
7665 * @param pVCpu The cross context virtual CPU structure.
7666 * @param u64IntInfo The VM-entry interruption-information field.
7667 * @param cbInstr The VM-entry instruction length in bytes (for
7668 * software interrupts, exceptions and privileged
7669 * software exceptions).
7670 * @param u32ErrCode The VM-entry exception error code.
7671 * @param GCPtrFaultAddress The page-fault address for \#PF exceptions.
7672 * @param pfIntrState Pointer to the current guest interruptibility-state.
7673 * This interruptibility-state will be updated if
7674 * necessary. This cannot not be NULL.
7675 * @param fStepping Whether we're running in
7676 * hmR0VmxRunGuestCodeStep() and should return
7677 * VINF_EM_DBG_STEPPED if the event is injected
7678 * directly (register modified by us, not by
7679 * hardware on VM-entry).
7680 */
7681static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
7682 RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *pfIntrState)
7683{
7684 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
7685 AssertMsg(!RT_HI_U32(u64IntInfo), ("%#RX64\n", u64IntInfo));
7686 Assert(pfIntrState);
7687
7688 PCPUMCTX pMixedCtx = &pVCpu->cpum.GstCtx;
7689 uint32_t u32IntInfo = (uint32_t)u64IntInfo;
7690 uint32_t const uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntInfo);
7691 uint32_t const uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo);
7692
7693#ifdef VBOX_STRICT
7694 /*
7695 * Validate the error-code-valid bit for hardware exceptions.
7696 * No error codes for exceptions in real-mode.
7697 *
7698 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
7699 */
7700 if ( uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
7701 && !CPUMIsGuestInRealModeEx(pMixedCtx))
7702 {
7703 switch (uVector)
7704 {
7705 case X86_XCPT_PF:
7706 case X86_XCPT_DF:
7707 case X86_XCPT_TS:
7708 case X86_XCPT_NP:
7709 case X86_XCPT_SS:
7710 case X86_XCPT_GP:
7711 case X86_XCPT_AC:
7712 AssertMsg(VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo),
7713 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
7714 RT_FALL_THRU();
7715 default:
7716 break;
7717 }
7718 }
7719#endif
7720
7721 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
7722 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
7723 || !(*pfIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
7724
7725 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
7726
7727 /*
7728 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
7729 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
7730 * interrupt handler in the (real-mode) guest.
7731 *
7732 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
7733 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
7734 */
7735 if (CPUMIsGuestInRealModeEx(pMixedCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
7736 {
7737 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest)
7738 {
7739 /*
7740 * For unrestricted execution enabled CPUs running real-mode guests, we must not
7741 * set the deliver-error-code bit.
7742 *
7743 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
7744 */
7745 u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7746 }
7747 else
7748 {
7749 PVM pVM = pVCpu->CTX_SUFF(pVM);
7750 Assert(PDMVmmDevHeapIsEnabled(pVM));
7751 Assert(pVM->hm.s.vmx.pRealModeTSS);
7752
7753 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
7754 int rc2 = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK | CPUMCTX_EXTRN_RIP
7755 | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
7756 AssertRCReturn(rc2, rc2);
7757
7758 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
7759 size_t const cbIdtEntry = sizeof(X86IDTR16);
7760 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
7761 {
7762 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
7763 if (uVector == X86_XCPT_DF)
7764 return VINF_EM_RESET;
7765
7766 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
7767 if (uVector == X86_XCPT_GP)
7768 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, pfIntrState);
7769
7770 /*
7771 * If we're injecting an event with no valid IDT entry, inject a #GP.
7772 * No error codes for exceptions in real-mode.
7773 *
7774 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
7775 */
7776 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, fStepping,
7777 pfIntrState);
7778 }
7779
7780 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
7781 uint16_t uGuestIp = pMixedCtx->ip;
7782 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
7783 {
7784 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
7785 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
7786 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
7787 }
7788 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
7789 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
7790
7791 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
7792 X86IDTR16 IdtEntry;
7793 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
7794 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
7795 AssertRCReturn(rc2, rc2);
7796
7797 /* Construct the stack frame for the interrupt/exception handler. */
7798 VBOXSTRICTRC rcStrict;
7799 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
7800 if (rcStrict == VINF_SUCCESS)
7801 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
7802 if (rcStrict == VINF_SUCCESS)
7803 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
7804
7805 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
7806 if (rcStrict == VINF_SUCCESS)
7807 {
7808 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
7809 pMixedCtx->rip = IdtEntry.offSel;
7810 pMixedCtx->cs.Sel = IdtEntry.uSel;
7811 pMixedCtx->cs.ValidSel = IdtEntry.uSel;
7812 pMixedCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
7813 if ( uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
7814 && uVector == X86_XCPT_PF)
7815 pMixedCtx->cr2 = GCPtrFaultAddress;
7816
7817 /* If any other guest-state bits are changed here, make sure to update
7818 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */
7819 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
7820 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
7821 | HM_CHANGED_GUEST_RSP);
7822
7823 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
7824 if (*pfIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
7825 {
7826 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
7827 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
7828 Log4Func(("Clearing inhibition due to STI\n"));
7829 *pfIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
7830 }
7831 Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
7832 u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->eflags.u, pMixedCtx->cs.Sel, pMixedCtx->eip));
7833
7834 /* The event has been truly dispatched. Mark it as no longer pending so we don't attempt to 'undo'
7835 it, if we are returning to ring-3 before executing guest code. */
7836 pVCpu->hm.s.Event.fPending = false;
7837
7838 /* Make hmR0VmxPreRunGuest() return if we're stepping since we've changed cs:rip. */
7839 if (fStepping)
7840 rcStrict = VINF_EM_DBG_STEPPED;
7841 }
7842 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
7843 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7844 return rcStrict;
7845 }
7846 }
7847
7848 /* Validate. */
7849 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
7850 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(u32IntInfo)); /* Bit 12 MBZ. */
7851 Assert(!(u32IntInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
7852
7853 /* Inject. */
7854 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
7855 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo))
7856 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
7857 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
7858 AssertRCReturn(rc, rc);
7859
7860 /* Update CR2. */
7861 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
7862 && uVector == X86_XCPT_PF)
7863 pMixedCtx->cr2 = GCPtrFaultAddress;
7864
7865 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
7866
7867 return VINF_SUCCESS;
7868}
7869
7870
7871/**
7872 * Clears the interrupt-window exiting control in the VMCS and if necessary
7873 * clears the current event in the VMCS as well.
7874 *
7875 * @returns VBox status code.
7876 * @param pVCpu The cross context virtual CPU structure.
7877 *
7878 * @remarks Use this function only to clear events that have not yet been
7879 * delivered to the guest but are injected in the VMCS!
7880 * @remarks No-long-jump zone!!!
7881 */
7882static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu)
7883{
7884 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
7885 {
7886 hmR0VmxClearIntWindowExitVmcs(pVCpu);
7887 Log4Func(("Cleared interrupt widow\n"));
7888 }
7889
7890 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)
7891 {
7892 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
7893 Log4Func(("Cleared interrupt widow\n"));
7894 }
7895}
7896
7897
7898/**
7899 * Enters the VT-x session.
7900 *
7901 * @returns VBox status code.
7902 * @param pVCpu The cross context virtual CPU structure.
7903 * @param pHostCpu Pointer to the global CPU info struct.
7904 */
7905VMMR0DECL(int) VMXR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu)
7906{
7907 AssertPtr(pVCpu);
7908 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported);
7909 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7910 RT_NOREF(pHostCpu);
7911
7912 LogFlowFunc(("pVCpu=%p\n", pVCpu));
7913 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
7914 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
7915
7916#ifdef VBOX_STRICT
7917 /* At least verify VMX is enabled, since we can't check if we're in VMX root mode without #GP'ing. */
7918 RTCCUINTREG uHostCR4 = ASMGetCR4();
7919 if (!(uHostCR4 & X86_CR4_VMXE))
7920 {
7921 LogRelFunc(("X86_CR4_VMXE bit in CR4 is not set!\n"));
7922 return VERR_VMX_X86_CR4_VMXE_CLEARED;
7923 }
7924#endif
7925
7926 /*
7927 * Load the VCPU's VMCS as the current (and active) one.
7928 */
7929 Assert(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR);
7930 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7931 if (RT_FAILURE(rc))
7932 return rc;
7933
7934 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
7935 pVCpu->hm.s.fLeaveDone = false;
7936 Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
7937
7938 return VINF_SUCCESS;
7939}
7940
7941
7942/**
7943 * The thread-context callback (only on platforms which support it).
7944 *
7945 * @param enmEvent The thread-context event.
7946 * @param pVCpu The cross context virtual CPU structure.
7947 * @param fGlobalInit Whether global VT-x/AMD-V init. was used.
7948 * @thread EMT(pVCpu)
7949 */
7950VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
7951{
7952 NOREF(fGlobalInit);
7953
7954 switch (enmEvent)
7955 {
7956 case RTTHREADCTXEVENT_OUT:
7957 {
7958 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7959 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
7960 VMCPU_ASSERT_EMT(pVCpu);
7961
7962 /* No longjmps (logger flushes, locks) in this fragile context. */
7963 VMMRZCallRing3Disable(pVCpu);
7964 Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId()));
7965
7966 /*
7967 * Restore host-state (FPU, debug etc.)
7968 */
7969 if (!pVCpu->hm.s.fLeaveDone)
7970 {
7971 /*
7972 * Do -not- import the guest-state here as we might already be in the middle of importing
7973 * it, esp. bad if we're holding the PGM lock, see comment in hmR0VmxImportGuestState().
7974 */
7975 hmR0VmxLeave(pVCpu, false /* fImportState */);
7976 pVCpu->hm.s.fLeaveDone = true;
7977 }
7978
7979 /* Leave HM context, takes care of local init (term). */
7980 int rc = HMR0LeaveCpu(pVCpu);
7981 AssertRC(rc); NOREF(rc);
7982
7983 /* Restore longjmp state. */
7984 VMMRZCallRing3Enable(pVCpu);
7985 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreempt);
7986 break;
7987 }
7988
7989 case RTTHREADCTXEVENT_IN:
7990 {
7991 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7992 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
7993 VMCPU_ASSERT_EMT(pVCpu);
7994
7995 /* No longjmps here, as we don't want to trigger preemption (& its hook) while resuming. */
7996 VMMRZCallRing3Disable(pVCpu);
7997 Log4Func(("Resumed: HostCpuId=%u\n", RTMpCpuId()));
7998
7999 /* Initialize the bare minimum state required for HM. This takes care of
8000 initializing VT-x if necessary (onlined CPUs, local init etc.) */
8001 int rc = hmR0EnterCpu(pVCpu);
8002 AssertRC(rc);
8003 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
8004 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
8005
8006 /* Load the active VMCS as the current one. */
8007 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR)
8008 {
8009 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8010 AssertRC(rc); NOREF(rc);
8011 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8012 Log4Func(("Resumed: Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8013 }
8014 pVCpu->hm.s.fLeaveDone = false;
8015
8016 /* Restore longjmp state. */
8017 VMMRZCallRing3Enable(pVCpu);
8018 break;
8019 }
8020
8021 default:
8022 break;
8023 }
8024}
8025
8026
8027/**
8028 * Exports the host state into the VMCS host-state area.
8029 * Sets up the VM-exit MSR-load area.
8030 *
8031 * The CPU state will be loaded from these fields on every successful VM-exit.
8032 *
8033 * @returns VBox status code.
8034 * @param pVCpu The cross context virtual CPU structure.
8035 *
8036 * @remarks No-long-jump zone!!!
8037 */
8038static int hmR0VmxExportHostState(PVMCPU pVCpu)
8039{
8040 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8041
8042 int rc = VINF_SUCCESS;
8043 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)
8044 {
8045 rc = hmR0VmxExportHostControlRegs();
8046 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8047
8048 rc = hmR0VmxExportHostSegmentRegs(pVCpu);
8049 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8050
8051 rc = hmR0VmxExportHostMsrs(pVCpu);
8052 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8053
8054 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_HOST_CONTEXT;
8055 }
8056 return rc;
8057}
8058
8059
8060/**
8061 * Saves the host state in the VMCS host-state.
8062 *
8063 * @returns VBox status code.
8064 * @param pVCpu The cross context virtual CPU structure.
8065 *
8066 * @remarks No-long-jump zone!!!
8067 */
8068VMMR0DECL(int) VMXR0ExportHostState(PVMCPU pVCpu)
8069{
8070 AssertPtr(pVCpu);
8071 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8072
8073 /*
8074 * Export the host state here while entering HM context.
8075 * When thread-context hooks are used, we might get preempted and have to re-save the host
8076 * state but most of the time we won't be, so do it here before we disable interrupts.
8077 */
8078 return hmR0VmxExportHostState(pVCpu);
8079}
8080
8081
8082/**
8083 * Exports the guest state into the VMCS guest-state area.
8084 *
8085 * The will typically be done before VM-entry when the guest-CPU state and the
8086 * VMCS state may potentially be out of sync.
8087 *
8088 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas. Sets up the
8089 * VM-entry controls.
8090 * Sets up the appropriate VMX non-root function to execute guest code based on
8091 * the guest CPU mode.
8092 *
8093 * @returns VBox strict status code.
8094 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
8095 * without unrestricted guest access and the VMMDev is not presently
8096 * mapped (e.g. EFI32).
8097 *
8098 * @param pVCpu The cross context virtual CPU structure.
8099 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8100 * out-of-sync. Make sure to update the required fields
8101 * before using them.
8102 *
8103 * @remarks No-long-jump zone!!!
8104 */
8105static VBOXSTRICTRC hmR0VmxExportGuestState(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
8106{
8107 AssertPtr(pVCpu);
8108 AssertPtr(pMixedCtx);
8109 HMVMX_ASSERT_PREEMPT_SAFE();
8110
8111 LogFlowFunc(("pVCpu=%p\n", pVCpu));
8112
8113 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
8114
8115 /* Determine real-on-v86 mode. */
8116 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
8117 if ( !pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest
8118 && CPUMIsGuestInRealModeEx(pMixedCtx))
8119 {
8120 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
8121 }
8122
8123 /*
8124 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
8125 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
8126 */
8127 int rc = hmR0VmxSelectVMRunHandler(pVCpu, pMixedCtx);
8128 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8129
8130 /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */
8131 rc = hmR0VmxExportGuestEntryCtls(pVCpu, pMixedCtx);
8132 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8133
8134 /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */
8135 rc = hmR0VmxExportGuestExitCtls(pVCpu, pMixedCtx);
8136 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8137
8138 rc = hmR0VmxExportGuestCR0(pVCpu, pMixedCtx);
8139 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8140
8141 VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu, pMixedCtx);
8142 if (rcStrict == VINF_SUCCESS)
8143 { /* likely */ }
8144 else
8145 {
8146 Assert(rcStrict == VINF_EM_RESCHEDULE_REM || RT_FAILURE_NP(rcStrict));
8147 return rcStrict;
8148 }
8149
8150 rc = hmR0VmxExportGuestSegmentRegs(pVCpu, pMixedCtx);
8151 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8152
8153 /* This needs to be done after hmR0VmxExportGuestEntryCtls() and hmR0VmxExportGuestExitCtls() as it
8154 may alter controls if we determine we don't have to swap EFER after all. */
8155 rc = hmR0VmxExportGuestMsrs(pVCpu, pMixedCtx);
8156 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8157
8158 rc = hmR0VmxExportGuestApicTpr(pVCpu);
8159 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8160
8161 rc = hmR0VmxExportGuestXcptIntercepts(pVCpu);
8162 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8163
8164 /* Exporting RFLAGS here is fine, even though RFLAGS.TF might depend on guest debug state which is
8165 not exported here. It is re-evaluated and updated if necessary in hmR0VmxExportSharedState(). */
8166 rc = hmR0VmxExportGuestRip(pVCpu, pMixedCtx);
8167 rc |= hmR0VmxExportGuestRsp(pVCpu, pMixedCtx);
8168 rc |= hmR0VmxExportGuestRflags(pVCpu, pMixedCtx);
8169 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8170
8171 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
8172 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP)
8173 | HM_CHANGED_GUEST_CR2
8174 | (HM_CHANGED_GUEST_DR_MASK & ~HM_CHANGED_GUEST_DR7)
8175 | HM_CHANGED_GUEST_X87
8176 | HM_CHANGED_GUEST_SSE_AVX
8177 | HM_CHANGED_GUEST_OTHER_XSAVE
8178 | HM_CHANGED_GUEST_XCRx
8179 | HM_CHANGED_GUEST_KERNEL_GS_BASE /* Part of lazy or auto load-store MSRs. */
8180 | HM_CHANGED_GUEST_SYSCALL_MSRS /* Part of lazy or auto load-store MSRs. */
8181 | HM_CHANGED_GUEST_TSC_AUX
8182 | HM_CHANGED_GUEST_OTHER_MSRS
8183 | HM_CHANGED_GUEST_HWVIRT
8184 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK)));
8185
8186 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x);
8187 return rc;
8188}
8189
8190
8191/**
8192 * Exports the state shared between the host and guest into the VMCS.
8193 *
8194 * @param pVCpu The cross context virtual CPU structure.
8195 * @param pCtx Pointer to the guest-CPU context.
8196 *
8197 * @remarks No-long-jump zone!!!
8198 */
8199static void hmR0VmxExportSharedState(PVMCPU pVCpu, PCPUMCTX pCtx)
8200{
8201 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8202 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8203
8204 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
8205 {
8206 int rc = hmR0VmxExportSharedDebugState(pVCpu, pCtx);
8207 AssertRC(rc);
8208 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
8209
8210 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
8211 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS)
8212 {
8213 rc = hmR0VmxExportGuestRflags(pVCpu, pCtx);
8214 AssertRC(rc);
8215 }
8216 }
8217
8218 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS)
8219 {
8220 hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
8221 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS;
8222 }
8223
8224 AssertMsg(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE),
8225 ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
8226}
8227
8228
8229/**
8230 * Worker for loading the guest-state bits in the inner VT-x execution loop.
8231 *
8232 * @returns Strict VBox status code (i.e. informational status codes too).
8233 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
8234 * without unrestricted guest access and the VMMDev is not presently
8235 * mapped (e.g. EFI32).
8236 *
8237 * @param pVCpu The cross context virtual CPU structure.
8238 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8239 * out-of-sync. Make sure to update the required fields
8240 * before using them.
8241 *
8242 * @remarks No-long-jump zone!!!
8243 */
8244static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
8245{
8246 HMVMX_ASSERT_PREEMPT_SAFE();
8247 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8248 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8249
8250#ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
8251 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
8252#endif
8253
8254 /*
8255 * For many exits it's only RIP that changes and hence try to export it first
8256 * without going through a lot of change flag checks.
8257 */
8258 VBOXSTRICTRC rcStrict;
8259 uint64_t fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
8260 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
8261 if ((fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) == HM_CHANGED_GUEST_RIP)
8262 {
8263 rcStrict = hmR0VmxExportGuestRip(pVCpu, pMixedCtx);
8264 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8265 { /* likely */}
8266 else
8267 AssertMsgFailedReturn(("hmR0VmxExportGuestRip failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
8268 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportMinimal);
8269 }
8270 else if (fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
8271 {
8272 rcStrict = hmR0VmxExportGuestState(pVCpu, pMixedCtx);
8273 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8274 { /* likely */}
8275 else
8276 {
8277 AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, ("hmR0VmxExportGuestState failed! rc=%Rrc\n",
8278 VBOXSTRICTRC_VAL(rcStrict)));
8279 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8280 return rcStrict;
8281 }
8282 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
8283 }
8284 else
8285 rcStrict = VINF_SUCCESS;
8286
8287#ifdef VBOX_STRICT
8288 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
8289 fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
8290 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
8291 AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)),
8292 ("fCtxChanged=%#RX64\n", fCtxChanged));
8293#endif
8294 return rcStrict;
8295}
8296
8297
8298/**
8299 * Does the preparations before executing guest code in VT-x.
8300 *
8301 * This may cause longjmps to ring-3 and may even result in rescheduling to the
8302 * recompiler/IEM. We must be cautious what we do here regarding committing
8303 * guest-state information into the VMCS assuming we assuredly execute the
8304 * guest in VT-x mode.
8305 *
8306 * If we fall back to the recompiler/IEM after updating the VMCS and clearing
8307 * the common-state (TRPM/forceflags), we must undo those changes so that the
8308 * recompiler/IEM can (and should) use them when it resumes guest execution.
8309 * Otherwise such operations must be done when we can no longer exit to ring-3.
8310 *
8311 * @returns Strict VBox status code (i.e. informational status codes too).
8312 * @retval VINF_SUCCESS if we can proceed with running the guest, interrupts
8313 * have been disabled.
8314 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a
8315 * double-fault into the guest.
8316 * @retval VINF_EM_DBG_STEPPED if @a fStepping is true and an event was
8317 * dispatched directly.
8318 * @retval VINF_* scheduling changes, we have to go back to ring-3.
8319 *
8320 * @param pVCpu The cross context virtual CPU structure.
8321 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8322 * out-of-sync. Make sure to update the required fields
8323 * before using them.
8324 * @param pVmxTransient Pointer to the VMX transient structure.
8325 * @param fStepping Set if called from hmR0VmxRunGuestCodeStep(). Makes
8326 * us ignore some of the reasons for returning to
8327 * ring-3, and return VINF_EM_DBG_STEPPED if event
8328 * dispatching took place.
8329 */
8330static VBOXSTRICTRC hmR0VmxPreRunGuest(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping)
8331{
8332 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8333
8334#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
8335 PGMRZDynMapFlushAutoSet(pVCpu);
8336#endif
8337
8338 /* Check force flag actions that might require us to go back to ring-3. */
8339 VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVCpu, pMixedCtx, fStepping);
8340 if (rcStrict == VINF_SUCCESS)
8341 { /* FFs doesn't get set all the time. */ }
8342 else
8343 return rcStrict;
8344
8345 /*
8346 * Setup the virtualized-APIC accesses.
8347 *
8348 * Note! This can cause a longjumps to R3 due to the acquisition of the PGM lock
8349 * in both PGMHandlerPhysicalReset() and IOMMMIOMapMMIOHCPage(), see @bugref{8721}.
8350 *
8351 * This is the reason we do it here and not in hmR0VmxExportGuestState().
8352 */
8353 PVM pVM = pVCpu->CTX_SUFF(pVM);
8354 if ( !pVCpu->hm.s.vmx.u64MsrApicBase
8355 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
8356 && PDMHasApic(pVM))
8357 {
8358 uint64_t const u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu);
8359 Assert(u64MsrApicBase);
8360 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
8361
8362 RTGCPHYS const GCPhysApicBase = u64MsrApicBase & PAGE_BASE_GC_MASK;
8363
8364 /* Unalias any existing mapping. */
8365 int rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
8366 AssertRCReturn(rc, rc);
8367
8368 /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */
8369 Log4Func(("Mapped HC APIC-access page at %#RGp\n", GCPhysApicBase));
8370 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
8371 AssertRCReturn(rc, rc);
8372
8373 /* Update the per-VCPU cache of the APIC base MSR. */
8374 pVCpu->hm.s.vmx.u64MsrApicBase = u64MsrApicBase;
8375 }
8376
8377 if (TRPMHasTrap(pVCpu))
8378 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
8379 uint32_t fIntrState = hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);
8380
8381 /*
8382 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus
8383 * needs to be done with longjmps or interrupts + preemption enabled. Event injection might
8384 * also result in triple-faulting the VM.
8385 */
8386 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fIntrState, fStepping);
8387 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8388 { /* likely */ }
8389 else
8390 {
8391 AssertMsg(rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
8392 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8393 return rcStrict;
8394 }
8395
8396 /*
8397 * A longjump might result in importing CR3 even for VM-exits that don't necessarily
8398 * import CR3 themselves. We will need to update them here as even as late as the above
8399 * hmR0VmxInjectPendingEvent() call may lazily import guest-CPU state on demand causing
8400 * the below force flags to be set.
8401 */
8402 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
8403 {
8404 Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
8405 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
8406 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
8407 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
8408 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
8409 }
8410 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
8411 {
8412 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
8413 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
8414 }
8415
8416 /*
8417 * No longjmps to ring-3 from this point on!!!
8418 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
8419 * This also disables flushing of the R0-logger instance (if any).
8420 */
8421 VMMRZCallRing3Disable(pVCpu);
8422
8423 /*
8424 * Export the guest state bits.
8425 *
8426 * We cannot perform longjmps while loading the guest state because we do not preserve the
8427 * host/guest state (although the VMCS will be preserved) across longjmps which can cause
8428 * CPU migration.
8429 *
8430 * If we are injecting events to a real-on-v86 mode guest, we will have to update
8431 * RIP and some segment registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs().
8432 * Hence, loading of the guest state needs to be done -after- injection of events.
8433 */
8434 rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu, pMixedCtx);
8435 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8436 { /* likely */ }
8437 else
8438 {
8439 VMMRZCallRing3Enable(pVCpu);
8440 return rcStrict;
8441 }
8442
8443 /*
8444 * We disable interrupts so that we don't miss any interrupts that would flag preemption
8445 * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with
8446 * preemption disabled for a while. Since this is purly to aid the
8447 * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and
8448 * disable interrupt on NT.
8449 *
8450 * We need to check for force-flags that could've possible been altered since we last
8451 * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section,
8452 * see @bugref{6398}).
8453 *
8454 * We also check a couple of other force-flags as a last opportunity to get the EMT back
8455 * to ring-3 before executing guest code.
8456 */
8457 pVmxTransient->fEFlags = ASMIntDisableFlags();
8458
8459 if ( ( !VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
8460 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
8461 || ( fStepping /* Optimized for the non-stepping case, so a bit of unnecessary work when stepping. */
8462 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) )
8463 {
8464 if (!RTThreadPreemptIsPending(NIL_RTTHREAD))
8465 {
8466 pVCpu->hm.s.Event.fPending = false;
8467
8468 /*
8469 * We've injected any pending events. This is really the point of no return (to ring-3).
8470 *
8471 * Note! The caller expects to continue with interrupts & longjmps disabled on successful
8472 * returns from this function, so don't enable them here.
8473 */
8474 return VINF_SUCCESS;
8475 }
8476
8477 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
8478 rcStrict = VINF_EM_RAW_INTERRUPT;
8479 }
8480 else
8481 {
8482 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
8483 rcStrict = VINF_EM_RAW_TO_R3;
8484 }
8485
8486 ASMSetFlags(pVmxTransient->fEFlags);
8487 VMMRZCallRing3Enable(pVCpu);
8488
8489 return rcStrict;
8490}
8491
8492
8493/**
8494 * Prepares to run guest code in VT-x and we've committed to doing so. This
8495 * means there is no backing out to ring-3 or anywhere else at this
8496 * point.
8497 *
8498 * @param pVCpu The cross context virtual CPU structure.
8499 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8500 * out-of-sync. Make sure to update the required fields
8501 * before using them.
8502 * @param pVmxTransient Pointer to the VMX transient structure.
8503 *
8504 * @remarks Called with preemption disabled.
8505 * @remarks No-long-jump zone!!!
8506 */
8507static void hmR0VmxPreRunGuestCommitted(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8508{
8509 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8510 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8511 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8512
8513 /*
8514 * Indicate start of guest execution and where poking EMT out of guest-context is recognized.
8515 */
8516 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8517 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
8518
8519 PVM pVM = pVCpu->CTX_SUFF(pVM);
8520 if (!CPUMIsGuestFPUStateActive(pVCpu))
8521 {
8522 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);
8523 if (CPUMR0LoadGuestFPU(pVM, pVCpu) == VINF_CPUM_HOST_CR0_MODIFIED)
8524 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT;
8525 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);
8526 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);
8527 }
8528
8529 /*
8530 * Lazy-update of the host MSRs values in the auto-load/store MSR area.
8531 */
8532 if ( !pVCpu->hm.s.vmx.fUpdatedHostMsrs
8533 && pVCpu->hm.s.vmx.cMsrs > 0)
8534 hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu);
8535
8536 /*
8537 * Re-save the host state bits as we may've been preempted (only happens when
8538 * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM).
8539 * Note that the 64-on-32 switcher saves the (64-bit) host state into the VMCS and
8540 * if we change the switcher back to 32-bit, we *must* save the 32-bit host state here.
8541 * See @bugref{8432}.
8542 */
8543 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)
8544 {
8545 int rc = hmR0VmxExportHostState(pVCpu);
8546 AssertRC(rc);
8547 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreemptExportHostState);
8548 }
8549 Assert(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT));
8550
8551 /*
8552 * Export the state shared between host and guest (FPU, debug, lazy MSRs).
8553 */
8554 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)
8555 hmR0VmxExportSharedState(pVCpu, pMixedCtx);
8556 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
8557
8558 /* Store status of the shared guest-host state at the time of VM-entry. */
8559#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
8560 if (CPUMIsGuestInLongModeEx(pMixedCtx))
8561 {
8562 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
8563 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
8564 }
8565 else
8566#endif
8567 {
8568 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
8569 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
8570 }
8571
8572 /*
8573 * Cache the TPR-shadow for checking on every VM-exit if it might have changed.
8574 */
8575 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8576 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR];
8577
8578 PHMGLOBALCPUINFO pCpu = hmR0GetCurrentCpu();
8579 RTCPUID idCurrentCpu = pCpu->idCpu;
8580 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
8581 || idCurrentCpu != pVCpu->hm.s.idLastCpu)
8582 {
8583 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu);
8584 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
8585 }
8586
8587 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */
8588 hmR0VmxFlushTaggedTlb(pVCpu, pCpu); /* Invalidate the appropriate guest entries from the TLB. */
8589 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
8590 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */
8591
8592 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
8593
8594 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
8595 to start executing. */
8596
8597 /*
8598 * Load the TSC_AUX MSR when we are not intercepting RDTSCP.
8599 */
8600 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
8601 {
8602 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8603 {
8604 bool fMsrUpdated;
8605 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_TSC_AUX);
8606 int rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu), true /* fUpdateHostMsr */,
8607 &fMsrUpdated);
8608 AssertRC(rc2);
8609 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8610 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */
8611 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
8612 }
8613 else
8614 {
8615 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX);
8616 Assert(!pVCpu->hm.s.vmx.cMsrs || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8617 }
8618 }
8619
8620 if (pVM->cpum.ro.GuestFeatures.fIbrs)
8621 {
8622 bool fMsrUpdated;
8623 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_OTHER_MSRS);
8624 int rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu), true /* fUpdateHostMsr */,
8625 &fMsrUpdated);
8626 AssertRC(rc2);
8627 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8628 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */
8629 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
8630 }
8631
8632#ifdef VBOX_STRICT
8633 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu);
8634 hmR0VmxCheckHostEferMsr(pVCpu);
8635 AssertRC(hmR0VmxCheckVmcsCtls(pVCpu));
8636#endif
8637#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
8638 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
8639 {
8640 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pMixedCtx);
8641 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
8642 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
8643 }
8644#endif
8645}
8646
8647
8648/**
8649 * Performs some essential restoration of state after running guest code in
8650 * VT-x.
8651 *
8652 * @param pVCpu The cross context virtual CPU structure.
8653 * @param pVmxTransient Pointer to the VMX transient structure.
8654 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
8655 *
8656 * @remarks Called with interrupts disabled, and returns with interrupts enabled!
8657 *
8658 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
8659 * unconditionally when it is safe to do so.
8660 */
8661static void hmR0VmxPostRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, int rcVMRun)
8662{
8663 uint64_t const uHostTsc = ASMReadTSC();
8664 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8665
8666 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
8667 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */
8668 pVCpu->hm.s.fCtxChanged = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */
8669 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
8670 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
8671 pVmxTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
8672
8673 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8674 TMCpuTickSetLastSeen(pVCpu, uHostTsc + pVCpu->hm.s.vmx.u64TscOffset);
8675
8676 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x);
8677 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
8678 Assert(!ASMIntAreEnabled());
8679 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8680
8681#if HC_ARCH_BITS == 64
8682 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Host state messed up by VT-x, we must restore. */
8683#endif
8684#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
8685 /* The 64-on-32 switcher maintains uVmcsState on its own and we need to leave it alone here. */
8686 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
8687 pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
8688#else
8689 pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
8690#endif
8691#ifdef VBOX_STRICT
8692 hmR0VmxCheckHostEferMsr(pVCpu); /* Verify that VMRUN/VMLAUNCH didn't modify host EFER. */
8693#endif
8694 ASMSetFlags(pVmxTransient->fEFlags); /* Enable interrupts. */
8695
8696 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
8697 uint32_t uExitReason;
8698 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
8699 rc |= hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
8700 AssertRC(rc);
8701 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
8702 pVmxTransient->fVMEntryFailed = VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uEntryIntInfo);
8703
8704 if (rcVMRun == VINF_SUCCESS)
8705 {
8706 /*
8707 * Update the VM-exit history array here even if the VM-entry failed due to:
8708 * - Invalid guest state.
8709 * - MSR loading.
8710 * - Machine-check event.
8711 *
8712 * In any of the above cases we will still have a "valid" VM-exit reason
8713 * despite @a fVMEntryFailed being false.
8714 *
8715 * See Intel spec. 26.7 "VM-Entry failures during or after loading guest state".
8716 *
8717 * Note! We don't have CS or RIP at this point. Will probably address that later
8718 * by amending the history entry added here.
8719 */
8720 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_VMX, pVmxTransient->uExitReason & EMEXIT_F_TYPE_MASK),
8721 UINT64_MAX, uHostTsc);
8722
8723 if (!pVmxTransient->fVMEntryFailed)
8724 {
8725 VMMRZCallRing3Enable(pVCpu);
8726
8727 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
8728 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
8729
8730#if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
8731 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
8732 AssertRC(rc);
8733#elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
8734 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_RFLAGS);
8735 AssertRC(rc);
8736#else
8737 /*
8738 * Import the guest-interruptibility state always as we need it while evaluating
8739 * injecting events on re-entry.
8740 *
8741 * We don't import CR0 (when Unrestricted guest execution is unavailable) despite
8742 * checking for real-mode while exporting the state because all bits that cause
8743 * mode changes wrt CR0 are intercepted.
8744 */
8745 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_HM_VMX_INT_STATE);
8746 AssertRC(rc);
8747#endif
8748
8749 /*
8750 * Sync the TPR shadow with our APIC state.
8751 */
8752 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8753 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR])
8754 {
8755 rc = APICSetTpr(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR]);
8756 AssertRC(rc);
8757 ASMAtomicOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8758 }
8759
8760 return;
8761 }
8762 }
8763 else
8764 {
8765 Log4Func(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));
8766 }
8767
8768 VMMRZCallRing3Enable(pVCpu);
8769}
8770
8771
8772/**
8773 * Runs the guest code using VT-x the normal way.
8774 *
8775 * @returns VBox status code.
8776 * @param pVCpu The cross context virtual CPU structure.
8777 * @param pCtx Pointer to the guest-CPU context.
8778 *
8779 * @note Mostly the same as hmR0VmxRunGuestCodeStep().
8780 */
8781static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVMCPU pVCpu, PCPUMCTX pCtx)
8782{
8783 VMXTRANSIENT VmxTransient;
8784 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
8785 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
8786 uint32_t cLoops = 0;
8787
8788 for (;; cLoops++)
8789 {
8790 Assert(!HMR0SuspendPending());
8791 HMVMX_ASSERT_CPU_SAFE();
8792
8793 /* Preparatory work for running guest code, this may force us to return
8794 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
8795 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
8796 rcStrict = hmR0VmxPreRunGuest(pVCpu, pCtx, &VmxTransient, false /* fStepping */);
8797 if (rcStrict != VINF_SUCCESS)
8798 break;
8799
8800 hmR0VmxPreRunGuestCommitted(pVCpu, pCtx, &VmxTransient);
8801 int rcRun = hmR0VmxRunGuest(pVCpu, pCtx);
8802 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
8803
8804 /* Restore any residual host-state and save any bits shared between host
8805 and guest into the guest-CPU state. Re-enables interrupts! */
8806 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
8807
8808 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
8809 if (RT_SUCCESS(rcRun))
8810 { /* very likely */ }
8811 else
8812 {
8813 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
8814 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, pCtx, &VmxTransient);
8815 return rcRun;
8816 }
8817
8818 /* Profile the VM-exit. */
8819 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
8820 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
8821 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
8822 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
8823 HMVMX_START_EXIT_DISPATCH_PROF();
8824
8825 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
8826
8827 /* Handle the VM-exit. */
8828#ifdef HMVMX_USE_FUNCTION_TABLE
8829 rcStrict = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
8830#else
8831 rcStrict = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
8832#endif
8833 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
8834 if (rcStrict == VINF_SUCCESS)
8835 {
8836 if (cLoops <= pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops)
8837 continue; /* likely */
8838 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
8839 rcStrict = VINF_EM_RAW_INTERRUPT;
8840 }
8841 break;
8842 }
8843
8844 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
8845 return rcStrict;
8846}
8847
8848
8849
8850/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
8851 * probes.
8852 *
8853 * The following few functions and associated structure contains the bloat
8854 * necessary for providing detailed debug events and dtrace probes as well as
8855 * reliable host side single stepping. This works on the principle of
8856 * "subclassing" the normal execution loop and workers. We replace the loop
8857 * method completely and override selected helpers to add necessary adjustments
8858 * to their core operation.
8859 *
8860 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
8861 * any performance for debug and analysis features.
8862 *
8863 * @{
8864 */
8865
8866/**
8867 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
8868 * the debug run loop.
8869 */
8870typedef struct VMXRUNDBGSTATE
8871{
8872 /** The RIP we started executing at. This is for detecting that we stepped. */
8873 uint64_t uRipStart;
8874 /** The CS we started executing with. */
8875 uint16_t uCsStart;
8876
8877 /** Whether we've actually modified the 1st execution control field. */
8878 bool fModifiedProcCtls : 1;
8879 /** Whether we've actually modified the 2nd execution control field. */
8880 bool fModifiedProcCtls2 : 1;
8881 /** Whether we've actually modified the exception bitmap. */
8882 bool fModifiedXcptBitmap : 1;
8883
8884 /** We desire the modified the CR0 mask to be cleared. */
8885 bool fClearCr0Mask : 1;
8886 /** We desire the modified the CR4 mask to be cleared. */
8887 bool fClearCr4Mask : 1;
8888 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
8889 uint32_t fCpe1Extra;
8890 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
8891 uint32_t fCpe1Unwanted;
8892 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
8893 uint32_t fCpe2Extra;
8894 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
8895 uint32_t bmXcptExtra;
8896 /** The sequence number of the Dtrace provider settings the state was
8897 * configured against. */
8898 uint32_t uDtraceSettingsSeqNo;
8899 /** VM-exits to check (one bit per VM-exit). */
8900 uint32_t bmExitsToCheck[3];
8901
8902 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
8903 uint32_t fProcCtlsInitial;
8904 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
8905 uint32_t fProcCtls2Initial;
8906 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
8907 uint32_t bmXcptInitial;
8908} VMXRUNDBGSTATE;
8909AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
8910typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
8911
8912
8913/**
8914 * Initializes the VMXRUNDBGSTATE structure.
8915 *
8916 * @param pVCpu The cross context virtual CPU structure of the
8917 * calling EMT.
8918 * @param pCtx The CPU register context to go with @a pVCpu.
8919 * @param pDbgState The structure to initialize.
8920 */
8921static void hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PCCPUMCTX pCtx, PVMXRUNDBGSTATE pDbgState)
8922{
8923 pDbgState->uRipStart = pCtx->rip;
8924 pDbgState->uCsStart = pCtx->cs.Sel;
8925
8926 pDbgState->fModifiedProcCtls = false;
8927 pDbgState->fModifiedProcCtls2 = false;
8928 pDbgState->fModifiedXcptBitmap = false;
8929 pDbgState->fClearCr0Mask = false;
8930 pDbgState->fClearCr4Mask = false;
8931 pDbgState->fCpe1Extra = 0;
8932 pDbgState->fCpe1Unwanted = 0;
8933 pDbgState->fCpe2Extra = 0;
8934 pDbgState->bmXcptExtra = 0;
8935 pDbgState->fProcCtlsInitial = pVCpu->hm.s.vmx.u32ProcCtls;
8936 pDbgState->fProcCtls2Initial = pVCpu->hm.s.vmx.u32ProcCtls2;
8937 pDbgState->bmXcptInitial = pVCpu->hm.s.vmx.u32XcptBitmap;
8938}
8939
8940
8941/**
8942 * Updates the VMSC fields with changes requested by @a pDbgState.
8943 *
8944 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
8945 * immediately before executing guest code, i.e. when interrupts are disabled.
8946 * We don't check status codes here as we cannot easily assert or return in the
8947 * latter case.
8948 *
8949 * @param pVCpu The cross context virtual CPU structure.
8950 * @param pDbgState The debug state.
8951 */
8952static void hmR0VmxPreRunGuestDebugStateApply(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState)
8953{
8954 /*
8955 * Ensure desired flags in VMCS control fields are set.
8956 * (Ignoring write failure here, as we're committed and it's just debug extras.)
8957 *
8958 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
8959 * there should be no stale data in pCtx at this point.
8960 */
8961 if ( (pVCpu->hm.s.vmx.u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
8962 || (pVCpu->hm.s.vmx.u32ProcCtls & pDbgState->fCpe1Unwanted))
8963 {
8964 pVCpu->hm.s.vmx.u32ProcCtls |= pDbgState->fCpe1Extra;
8965 pVCpu->hm.s.vmx.u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
8966 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
8967 Log6(("hmR0VmxRunDebugStateRevert: VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVCpu->hm.s.vmx.u32ProcCtls));
8968 pDbgState->fModifiedProcCtls = true;
8969 }
8970
8971 if ((pVCpu->hm.s.vmx.u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
8972 {
8973 pVCpu->hm.s.vmx.u32ProcCtls2 |= pDbgState->fCpe2Extra;
8974 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pVCpu->hm.s.vmx.u32ProcCtls2);
8975 Log6(("hmR0VmxRunDebugStateRevert: VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVCpu->hm.s.vmx.u32ProcCtls2));
8976 pDbgState->fModifiedProcCtls2 = true;
8977 }
8978
8979 if ((pVCpu->hm.s.vmx.u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
8980 {
8981 pVCpu->hm.s.vmx.u32XcptBitmap |= pDbgState->bmXcptExtra;
8982 VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
8983 Log6(("hmR0VmxRunDebugStateRevert: VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVCpu->hm.s.vmx.u32XcptBitmap));
8984 pDbgState->fModifiedXcptBitmap = true;
8985 }
8986
8987 if (pDbgState->fClearCr0Mask && pVCpu->hm.s.vmx.u32Cr0Mask != 0)
8988 {
8989 pVCpu->hm.s.vmx.u32Cr0Mask = 0;
8990 VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, 0);
8991 Log6(("hmR0VmxRunDebugStateRevert: VMX_VMCS_CTRL_CR0_MASK: 0\n"));
8992 }
8993
8994 if (pDbgState->fClearCr4Mask && pVCpu->hm.s.vmx.u32Cr4Mask != 0)
8995 {
8996 pVCpu->hm.s.vmx.u32Cr4Mask = 0;
8997 VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, 0);
8998 Log6(("hmR0VmxRunDebugStateRevert: VMX_VMCS_CTRL_CR4_MASK: 0\n"));
8999 }
9000}
9001
9002
9003static VBOXSTRICTRC hmR0VmxRunDebugStateRevert(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, VBOXSTRICTRC rcStrict)
9004{
9005 /*
9006 * Restore VM-exit control settings as we may not reenter this function the
9007 * next time around.
9008 */
9009 /* We reload the initial value, trigger what we can of recalculations the
9010 next time around. From the looks of things, that's all that's required atm. */
9011 if (pDbgState->fModifiedProcCtls)
9012 {
9013 if (!(pDbgState->fProcCtlsInitial & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
9014 pDbgState->fProcCtlsInitial |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
9015 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
9016 AssertRCReturn(rc2, rc2);
9017 pVCpu->hm.s.vmx.u32ProcCtls = pDbgState->fProcCtlsInitial;
9018 }
9019
9020 /* We're currently the only ones messing with this one, so just restore the
9021 cached value and reload the field. */
9022 if ( pDbgState->fModifiedProcCtls2
9023 && pVCpu->hm.s.vmx.u32ProcCtls2 != pDbgState->fProcCtls2Initial)
9024 {
9025 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
9026 AssertRCReturn(rc2, rc2);
9027 pVCpu->hm.s.vmx.u32ProcCtls2 = pDbgState->fProcCtls2Initial;
9028 }
9029
9030 /* If we've modified the exception bitmap, we restore it and trigger
9031 reloading and partial recalculation the next time around. */
9032 if (pDbgState->fModifiedXcptBitmap)
9033 pVCpu->hm.s.vmx.u32XcptBitmap = pDbgState->bmXcptInitial;
9034
9035 return rcStrict;
9036}
9037
9038
9039/**
9040 * Configures VM-exit controls for current DBGF and DTrace settings.
9041 *
9042 * This updates @a pDbgState and the VMCS execution control fields to reflect
9043 * the necessary VM-exits demanded by DBGF and DTrace.
9044 *
9045 * @param pVCpu The cross context virtual CPU structure.
9046 * @param pDbgState The debug state.
9047 * @param pVmxTransient Pointer to the VMX transient structure. May update
9048 * fUpdateTscOffsettingAndPreemptTimer.
9049 */
9050static void hmR0VmxPreRunGuestDebugStateUpdate(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, PVMXTRANSIENT pVmxTransient)
9051{
9052 /*
9053 * Take down the dtrace serial number so we can spot changes.
9054 */
9055 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
9056 ASMCompilerBarrier();
9057
9058 /*
9059 * We'll rebuild most of the middle block of data members (holding the
9060 * current settings) as we go along here, so start by clearing it all.
9061 */
9062 pDbgState->bmXcptExtra = 0;
9063 pDbgState->fCpe1Extra = 0;
9064 pDbgState->fCpe1Unwanted = 0;
9065 pDbgState->fCpe2Extra = 0;
9066 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
9067 pDbgState->bmExitsToCheck[i] = 0;
9068
9069 /*
9070 * Software interrupts (INT XXh) - no idea how to trigger these...
9071 */
9072 PVM pVM = pVCpu->CTX_SUFF(pVM);
9073 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
9074 || VBOXVMM_INT_SOFTWARE_ENABLED())
9075 {
9076 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
9077 }
9078
9079 /*
9080 * INT3 breakpoints - triggered by #BP exceptions.
9081 */
9082 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
9083 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
9084
9085 /*
9086 * Exception bitmap and XCPT events+probes.
9087 */
9088 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
9089 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
9090 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
9091
9092 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
9093 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
9094 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
9095 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
9096 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
9097 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
9098 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
9099 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
9100 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
9101 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
9102 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
9103 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
9104 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
9105 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
9106 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
9107 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
9108 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
9109 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
9110
9111 if (pDbgState->bmXcptExtra)
9112 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
9113
9114 /*
9115 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
9116 *
9117 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
9118 * So, when adding/changing/removing please don't forget to update it.
9119 *
9120 * Some of the macros are picking up local variables to save horizontal space,
9121 * (being able to see it in a table is the lesser evil here).
9122 */
9123#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
9124 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
9125 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
9126#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
9127 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
9128 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
9129 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
9130 } else do { } while (0)
9131#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
9132 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
9133 { \
9134 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
9135 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
9136 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
9137 } else do { } while (0)
9138#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
9139 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
9140 { \
9141 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
9142 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
9143 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
9144 } else do { } while (0)
9145#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
9146 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
9147 { \
9148 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
9149 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
9150 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
9151 } else do { } while (0)
9152
9153 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
9154 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
9155 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
9156 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
9157 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
9158
9159 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
9160 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
9161 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
9162 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
9163 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT); /* paranoia */
9164 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
9165 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
9166 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
9167 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT);
9168 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
9169 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT);
9170 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
9171 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT);
9172 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
9173 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
9174 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
9175 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
9176 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
9177 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
9178 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
9179 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
9180 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
9181 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
9182 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
9183 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
9184 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
9185 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
9186 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
9187 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
9188 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
9189 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
9190 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
9191 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
9192 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
9193 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
9194 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
9195
9196 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
9197 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
9198 {
9199 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_APIC_TPR);
9200 AssertRC(rc);
9201
9202#if 0 /** @todo fix me */
9203 pDbgState->fClearCr0Mask = true;
9204 pDbgState->fClearCr4Mask = true;
9205#endif
9206 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
9207 pDbgState->fCpe1Extra |= VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT;
9208 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
9209 pDbgState->fCpe1Extra |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT;
9210 pDbgState->fCpe1Unwanted |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* risky? */
9211 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
9212 require clearing here and in the loop if we start using it. */
9213 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
9214 }
9215 else
9216 {
9217 if (pDbgState->fClearCr0Mask)
9218 {
9219 pDbgState->fClearCr0Mask = false;
9220 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);
9221 }
9222 if (pDbgState->fClearCr4Mask)
9223 {
9224 pDbgState->fClearCr4Mask = false;
9225 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4);
9226 }
9227 }
9228 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
9229 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
9230
9231 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
9232 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
9233 {
9234 /** @todo later, need to fix handler as it assumes this won't usually happen. */
9235 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
9236 }
9237 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
9238 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
9239
9240 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS); /* risky clearing this? */
9241 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
9242 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS);
9243 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
9244 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT); /* paranoia */
9245 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
9246 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT); /* paranoia */
9247 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
9248#if 0 /** @todo too slow, fix handler. */
9249 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT);
9250#endif
9251 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
9252
9253 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
9254 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
9255 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
9256 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
9257 {
9258 pDbgState->fCpe2Extra |= VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT;
9259 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XDTR_ACCESS);
9260 }
9261 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_XDTR_ACCESS);
9262 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_XDTR_ACCESS);
9263 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_XDTR_ACCESS);
9264 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_XDTR_ACCESS);
9265
9266 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
9267 || IS_EITHER_ENABLED(pVM, INSTR_STR)
9268 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
9269 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
9270 {
9271 pDbgState->fCpe2Extra |= VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT;
9272 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_TR_ACCESS);
9273 }
9274 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_TR_ACCESS);
9275 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_TR_ACCESS);
9276 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_TR_ACCESS);
9277 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_TR_ACCESS);
9278
9279 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
9280 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
9281 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT);
9282 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
9283 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
9284 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
9285 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT);
9286 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
9287 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
9288 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
9289 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT);
9290 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
9291 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT);
9292 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
9293 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
9294 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
9295 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_VMCS_CTRL_PROC_EXEC2_RDSEED_EXIT);
9296 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
9297 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
9298 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
9299 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
9300 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
9301
9302#undef IS_EITHER_ENABLED
9303#undef SET_ONLY_XBM_IF_EITHER_EN
9304#undef SET_CPE1_XBM_IF_EITHER_EN
9305#undef SET_CPEU_XBM_IF_EITHER_EN
9306#undef SET_CPE2_XBM_IF_EITHER_EN
9307
9308 /*
9309 * Sanitize the control stuff.
9310 */
9311 pDbgState->fCpe2Extra &= pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1;
9312 if (pDbgState->fCpe2Extra)
9313 pDbgState->fCpe1Extra |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
9314 pDbgState->fCpe1Extra &= pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1;
9315 pDbgState->fCpe1Unwanted &= ~pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0;
9316 if (pVCpu->hm.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
9317 {
9318 pVCpu->hm.s.fDebugWantRdTscExit ^= true;
9319 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
9320 }
9321
9322 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
9323 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
9324 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
9325 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
9326}
9327
9328
9329/**
9330 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
9331 * appropriate.
9332 *
9333 * The caller has checked the VM-exit against the
9334 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
9335 * already, so we don't have to do that either.
9336 *
9337 * @returns Strict VBox status code (i.e. informational status codes too).
9338 * @param pVCpu The cross context virtual CPU structure.
9339 * @param pMixedCtx Pointer to the guest-CPU context.
9340 * @param pVmxTransient Pointer to the VMX-transient structure.
9341 * @param uExitReason The VM-exit reason.
9342 *
9343 * @remarks The name of this function is displayed by dtrace, so keep it short
9344 * and to the point. No longer than 33 chars long, please.
9345 */
9346static VBOXSTRICTRC hmR0VmxHandleExitDtraceEvents(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
9347 uint32_t uExitReason)
9348{
9349 /*
9350 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
9351 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
9352 *
9353 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
9354 * does. Must add/change/remove both places. Same ordering, please.
9355 *
9356 * Added/removed events must also be reflected in the next section
9357 * where we dispatch dtrace events.
9358 */
9359 bool fDtrace1 = false;
9360 bool fDtrace2 = false;
9361 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
9362 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
9363 uint32_t uEventArg = 0;
9364#define SET_EXIT(a_EventSubName) \
9365 do { \
9366 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
9367 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
9368 } while (0)
9369#define SET_BOTH(a_EventSubName) \
9370 do { \
9371 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
9372 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
9373 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
9374 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
9375 } while (0)
9376 switch (uExitReason)
9377 {
9378 case VMX_EXIT_MTF:
9379 return hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient);
9380
9381 case VMX_EXIT_XCPT_OR_NMI:
9382 {
9383 uint8_t const idxVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
9384 switch (VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo))
9385 {
9386 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
9387 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT:
9388 case VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT:
9389 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
9390 {
9391 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uExitIntInfo))
9392 {
9393 hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
9394 uEventArg = pVmxTransient->uExitIntErrorCode;
9395 }
9396 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
9397 switch (enmEvent1)
9398 {
9399 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
9400 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
9401 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
9402 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
9403 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
9404 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
9405 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
9406 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
9407 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
9408 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
9409 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
9410 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
9411 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
9412 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
9413 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
9414 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
9415 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
9416 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
9417 default: break;
9418 }
9419 }
9420 else
9421 AssertFailed();
9422 break;
9423
9424 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT:
9425 uEventArg = idxVector;
9426 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
9427 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
9428 break;
9429 }
9430 break;
9431 }
9432
9433 case VMX_EXIT_TRIPLE_FAULT:
9434 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
9435 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
9436 break;
9437 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
9438 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
9439 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
9440 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
9441 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
9442
9443 /* Instruction specific VM-exits: */
9444 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
9445 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
9446 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
9447 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
9448 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
9449 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
9450 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
9451 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
9452 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
9453 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
9454 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
9455 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
9456 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
9457 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
9458 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
9459 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
9460 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
9461 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
9462 case VMX_EXIT_MOV_CRX:
9463 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9464 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
9465 SET_BOTH(CRX_READ);
9466 else
9467 SET_BOTH(CRX_WRITE);
9468 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQualification);
9469 break;
9470 case VMX_EXIT_MOV_DRX:
9471 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9472 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification)
9473 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
9474 SET_BOTH(DRX_READ);
9475 else
9476 SET_BOTH(DRX_WRITE);
9477 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification);
9478 break;
9479 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
9480 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
9481 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
9482 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
9483 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
9484 case VMX_EXIT_XDTR_ACCESS:
9485 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
9486 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_XDTR_INSINFO_INSTR_ID))
9487 {
9488 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
9489 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
9490 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
9491 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
9492 }
9493 break;
9494
9495 case VMX_EXIT_TR_ACCESS:
9496 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
9497 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_YYTR_INSINFO_INSTR_ID))
9498 {
9499 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
9500 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
9501 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
9502 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
9503 }
9504 break;
9505
9506 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
9507 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
9508 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
9509 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
9510 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
9511 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
9512 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
9513 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
9514 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
9515 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
9516 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
9517
9518 /* Events that aren't relevant at this point. */
9519 case VMX_EXIT_EXT_INT:
9520 case VMX_EXIT_INT_WINDOW:
9521 case VMX_EXIT_NMI_WINDOW:
9522 case VMX_EXIT_TPR_BELOW_THRESHOLD:
9523 case VMX_EXIT_PREEMPT_TIMER:
9524 case VMX_EXIT_IO_INSTR:
9525 break;
9526
9527 /* Errors and unexpected events. */
9528 case VMX_EXIT_INIT_SIGNAL:
9529 case VMX_EXIT_SIPI:
9530 case VMX_EXIT_IO_SMI:
9531 case VMX_EXIT_SMI:
9532 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
9533 case VMX_EXIT_ERR_MSR_LOAD:
9534 case VMX_EXIT_ERR_MACHINE_CHECK:
9535 break;
9536
9537 default:
9538 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
9539 break;
9540 }
9541#undef SET_BOTH
9542#undef SET_EXIT
9543
9544 /*
9545 * Dtrace tracepoints go first. We do them here at once so we don't
9546 * have to copy the guest state saving and stuff a few dozen times.
9547 * Down side is that we've got to repeat the switch, though this time
9548 * we use enmEvent since the probes are a subset of what DBGF does.
9549 */
9550 if (fDtrace1 || fDtrace2)
9551 {
9552 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9553 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
9554 switch (enmEvent1)
9555 {
9556 /** @todo consider which extra parameters would be helpful for each probe. */
9557 case DBGFEVENT_END: break;
9558 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pMixedCtx); break;
9559 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pMixedCtx, pMixedCtx->dr[6]); break;
9560 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pMixedCtx); break;
9561 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pMixedCtx); break;
9562 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pMixedCtx); break;
9563 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pMixedCtx); break;
9564 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pMixedCtx); break;
9565 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pMixedCtx); break;
9566 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pMixedCtx, uEventArg); break;
9567 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pMixedCtx, uEventArg); break;
9568 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pMixedCtx, uEventArg); break;
9569 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pMixedCtx, uEventArg); break;
9570 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pMixedCtx, uEventArg, pMixedCtx->cr2); break;
9571 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pMixedCtx); break;
9572 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pMixedCtx); break;
9573 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pMixedCtx); break;
9574 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pMixedCtx); break;
9575 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pMixedCtx, uEventArg); break;
9576 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9577 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pMixedCtx, pMixedCtx->eax, pMixedCtx->ecx); break;
9578 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pMixedCtx); break;
9579 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pMixedCtx); break;
9580 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pMixedCtx); break;
9581 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pMixedCtx); break;
9582 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pMixedCtx); break;
9583 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pMixedCtx); break;
9584 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pMixedCtx); break;
9585 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9586 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9587 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9588 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9589 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pMixedCtx, pMixedCtx->ecx); break;
9590 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pMixedCtx, pMixedCtx->ecx,
9591 RT_MAKE_U64(pMixedCtx->eax, pMixedCtx->edx)); break;
9592 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pMixedCtx); break;
9593 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pMixedCtx); break;
9594 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pMixedCtx); break;
9595 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pMixedCtx); break;
9596 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pMixedCtx); break;
9597 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pMixedCtx); break;
9598 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pMixedCtx); break;
9599 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pMixedCtx); break;
9600 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pMixedCtx); break;
9601 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pMixedCtx); break;
9602 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pMixedCtx); break;
9603 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pMixedCtx); break;
9604 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pMixedCtx); break;
9605 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pMixedCtx); break;
9606 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pMixedCtx); break;
9607 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pMixedCtx); break;
9608 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pMixedCtx); break;
9609 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pMixedCtx); break;
9610 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pMixedCtx); break;
9611 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pMixedCtx); break;
9612 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pMixedCtx); break;
9613 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pMixedCtx); break;
9614 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pMixedCtx); break;
9615 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pMixedCtx); break;
9616 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pMixedCtx); break;
9617 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pMixedCtx); break;
9618 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pMixedCtx); break;
9619 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pMixedCtx); break;
9620 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pMixedCtx); break;
9621 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pMixedCtx); break;
9622 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pMixedCtx); break;
9623 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pMixedCtx); break;
9624 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
9625 }
9626 switch (enmEvent2)
9627 {
9628 /** @todo consider which extra parameters would be helpful for each probe. */
9629 case DBGFEVENT_END: break;
9630 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pMixedCtx); break;
9631 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pMixedCtx, pMixedCtx->eax, pMixedCtx->ecx); break;
9632 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pMixedCtx); break;
9633 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pMixedCtx); break;
9634 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pMixedCtx); break;
9635 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pMixedCtx); break;
9636 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pMixedCtx); break;
9637 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pMixedCtx); break;
9638 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pMixedCtx); break;
9639 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9640 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9641 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9642 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9643 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pMixedCtx, pMixedCtx->ecx); break;
9644 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pMixedCtx, pMixedCtx->ecx,
9645 RT_MAKE_U64(pMixedCtx->eax, pMixedCtx->edx)); break;
9646 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pMixedCtx); break;
9647 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pMixedCtx); break;
9648 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pMixedCtx); break;
9649 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pMixedCtx); break;
9650 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pMixedCtx); break;
9651 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pMixedCtx); break;
9652 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pMixedCtx); break;
9653 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pMixedCtx); break;
9654 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pMixedCtx); break;
9655 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pMixedCtx); break;
9656 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pMixedCtx); break;
9657 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pMixedCtx); break;
9658 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pMixedCtx); break;
9659 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pMixedCtx); break;
9660 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pMixedCtx); break;
9661 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pMixedCtx); break;
9662 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pMixedCtx); break;
9663 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pMixedCtx); break;
9664 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pMixedCtx); break;
9665 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pMixedCtx); break;
9666 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pMixedCtx); break;
9667 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pMixedCtx); break;
9668 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pMixedCtx); break;
9669 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pMixedCtx); break;
9670 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pMixedCtx); break;
9671 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pMixedCtx); break;
9672 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pMixedCtx); break;
9673 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pMixedCtx); break;
9674 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pMixedCtx); break;
9675 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pMixedCtx); break;
9676 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pMixedCtx); break;
9677 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pMixedCtx); break;
9678 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pMixedCtx); break;
9679 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pMixedCtx); break;
9680 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pMixedCtx); break;
9681 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pMixedCtx); break;
9682 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
9683 }
9684 }
9685
9686 /*
9687 * Fire of the DBGF event, if enabled (our check here is just a quick one,
9688 * the DBGF call will do a full check).
9689 *
9690 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
9691 * Note! If we have to events, we prioritize the first, i.e. the instruction
9692 * one, in order to avoid event nesting.
9693 */
9694 PVM pVM = pVCpu->CTX_SUFF(pVM);
9695 if ( enmEvent1 != DBGFEVENT_END
9696 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
9697 {
9698 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArg(pVM, pVCpu, enmEvent1, uEventArg, DBGFEVENTCTX_HM);
9699 if (rcStrict != VINF_SUCCESS)
9700 return rcStrict;
9701 }
9702 else if ( enmEvent2 != DBGFEVENT_END
9703 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
9704 {
9705 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArg(pVM, pVCpu, enmEvent2, uEventArg, DBGFEVENTCTX_HM);
9706 if (rcStrict != VINF_SUCCESS)
9707 return rcStrict;
9708 }
9709
9710 return VINF_SUCCESS;
9711}
9712
9713
9714/**
9715 * Single-stepping VM-exit filtering.
9716 *
9717 * This is preprocessing the VM-exits and deciding whether we've gotten far
9718 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
9719 * handling is performed.
9720 *
9721 * @returns Strict VBox status code (i.e. informational status codes too).
9722 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9723 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
9724 * out-of-sync. Make sure to update the required
9725 * fields before using them.
9726 * @param pVmxTransient Pointer to the VMX-transient structure.
9727 * @param uExitReason The VM-exit reason.
9728 * @param pDbgState The debug state.
9729 */
9730DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
9731 uint32_t uExitReason, PVMXRUNDBGSTATE pDbgState)
9732{
9733 /*
9734 * Expensive (saves context) generic dtrace VM-exit probe.
9735 */
9736 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
9737 { /* more likely */ }
9738 else
9739 {
9740 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9741 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
9742 AssertRC(rc);
9743 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pMixedCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQualification);
9744 }
9745
9746 /*
9747 * Check for host NMI, just to get that out of the way.
9748 */
9749 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
9750 { /* normally likely */ }
9751 else
9752 {
9753 int rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
9754 AssertRCReturn(rc2, rc2);
9755 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
9756 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9757 return hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient);
9758 }
9759
9760 /*
9761 * Check for single stepping event if we're stepping.
9762 */
9763 if (pVCpu->hm.s.fSingleInstruction)
9764 {
9765 switch (uExitReason)
9766 {
9767 case VMX_EXIT_MTF:
9768 return hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient);
9769
9770 /* Various events: */
9771 case VMX_EXIT_XCPT_OR_NMI:
9772 case VMX_EXIT_EXT_INT:
9773 case VMX_EXIT_TRIPLE_FAULT:
9774 case VMX_EXIT_INT_WINDOW:
9775 case VMX_EXIT_NMI_WINDOW:
9776 case VMX_EXIT_TASK_SWITCH:
9777 case VMX_EXIT_TPR_BELOW_THRESHOLD:
9778 case VMX_EXIT_APIC_ACCESS:
9779 case VMX_EXIT_EPT_VIOLATION:
9780 case VMX_EXIT_EPT_MISCONFIG:
9781 case VMX_EXIT_PREEMPT_TIMER:
9782
9783 /* Instruction specific VM-exits: */
9784 case VMX_EXIT_CPUID:
9785 case VMX_EXIT_GETSEC:
9786 case VMX_EXIT_HLT:
9787 case VMX_EXIT_INVD:
9788 case VMX_EXIT_INVLPG:
9789 case VMX_EXIT_RDPMC:
9790 case VMX_EXIT_RDTSC:
9791 case VMX_EXIT_RSM:
9792 case VMX_EXIT_VMCALL:
9793 case VMX_EXIT_VMCLEAR:
9794 case VMX_EXIT_VMLAUNCH:
9795 case VMX_EXIT_VMPTRLD:
9796 case VMX_EXIT_VMPTRST:
9797 case VMX_EXIT_VMREAD:
9798 case VMX_EXIT_VMRESUME:
9799 case VMX_EXIT_VMWRITE:
9800 case VMX_EXIT_VMXOFF:
9801 case VMX_EXIT_VMXON:
9802 case VMX_EXIT_MOV_CRX:
9803 case VMX_EXIT_MOV_DRX:
9804 case VMX_EXIT_IO_INSTR:
9805 case VMX_EXIT_RDMSR:
9806 case VMX_EXIT_WRMSR:
9807 case VMX_EXIT_MWAIT:
9808 case VMX_EXIT_MONITOR:
9809 case VMX_EXIT_PAUSE:
9810 case VMX_EXIT_XDTR_ACCESS:
9811 case VMX_EXIT_TR_ACCESS:
9812 case VMX_EXIT_INVEPT:
9813 case VMX_EXIT_RDTSCP:
9814 case VMX_EXIT_INVVPID:
9815 case VMX_EXIT_WBINVD:
9816 case VMX_EXIT_XSETBV:
9817 case VMX_EXIT_RDRAND:
9818 case VMX_EXIT_INVPCID:
9819 case VMX_EXIT_VMFUNC:
9820 case VMX_EXIT_RDSEED:
9821 case VMX_EXIT_XSAVES:
9822 case VMX_EXIT_XRSTORS:
9823 {
9824 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
9825 AssertRCReturn(rc, rc);
9826 if ( pMixedCtx->rip != pDbgState->uRipStart
9827 || pMixedCtx->cs.Sel != pDbgState->uCsStart)
9828 return VINF_EM_DBG_STEPPED;
9829 break;
9830 }
9831
9832 /* Errors and unexpected events: */
9833 case VMX_EXIT_INIT_SIGNAL:
9834 case VMX_EXIT_SIPI:
9835 case VMX_EXIT_IO_SMI:
9836 case VMX_EXIT_SMI:
9837 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
9838 case VMX_EXIT_ERR_MSR_LOAD:
9839 case VMX_EXIT_ERR_MACHINE_CHECK:
9840 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
9841 break;
9842
9843 default:
9844 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
9845 break;
9846 }
9847 }
9848
9849 /*
9850 * Check for debugger event breakpoints and dtrace probes.
9851 */
9852 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
9853 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
9854 {
9855 VBOXSTRICTRC rcStrict = hmR0VmxHandleExitDtraceEvents(pVCpu, pMixedCtx, pVmxTransient, uExitReason);
9856 if (rcStrict != VINF_SUCCESS)
9857 return rcStrict;
9858 }
9859
9860 /*
9861 * Normal processing.
9862 */
9863#ifdef HMVMX_USE_FUNCTION_TABLE
9864 return g_apfnVMExitHandlers[uExitReason](pVCpu, pMixedCtx, pVmxTransient);
9865#else
9866 return hmR0VmxHandleExit(pVCpu, pMixedCtx, pVmxTransient, uExitReason);
9867#endif
9868}
9869
9870
9871/**
9872 * Single steps guest code using VT-x.
9873 *
9874 * @returns Strict VBox status code (i.e. informational status codes too).
9875 * @param pVCpu The cross context virtual CPU structure.
9876 * @param pCtx Pointer to the guest-CPU context.
9877 *
9878 * @note Mostly the same as hmR0VmxRunGuestCodeNormal().
9879 */
9880static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVMCPU pVCpu, PCPUMCTX pCtx)
9881{
9882 VMXTRANSIENT VmxTransient;
9883 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
9884
9885 /* Set HMCPU indicators. */
9886 bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction;
9887 pVCpu->hm.s.fSingleInstruction = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
9888 pVCpu->hm.s.fDebugWantRdTscExit = false;
9889 pVCpu->hm.s.fUsingDebugLoop = true;
9890
9891 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */
9892 VMXRUNDBGSTATE DbgState;
9893 hmR0VmxRunDebugStateInit(pVCpu, pCtx, &DbgState);
9894 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &DbgState, &VmxTransient);
9895
9896 /*
9897 * The loop.
9898 */
9899 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
9900 for (uint32_t cLoops = 0; ; cLoops++)
9901 {
9902 Assert(!HMR0SuspendPending());
9903 HMVMX_ASSERT_CPU_SAFE();
9904 bool fStepping = pVCpu->hm.s.fSingleInstruction;
9905
9906 /*
9907 * Preparatory work for running guest code, this may force us to return
9908 * to ring-3. This bugger disables interrupts on VINF_SUCCESS!
9909 */
9910 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
9911 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Set up execute controls the next to can respond to. */
9912 rcStrict = hmR0VmxPreRunGuest(pVCpu, pCtx, &VmxTransient, fStepping);
9913 if (rcStrict != VINF_SUCCESS)
9914 break;
9915
9916 hmR0VmxPreRunGuestCommitted(pVCpu, pCtx, &VmxTransient);
9917 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Override any obnoxious code in the above two calls. */
9918
9919 /*
9920 * Now we can run the guest code.
9921 */
9922 int rcRun = hmR0VmxRunGuest(pVCpu, pCtx);
9923
9924 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
9925
9926 /*
9927 * Restore any residual host-state and save any bits shared between host
9928 * and guest into the guest-CPU state. Re-enables interrupts!
9929 */
9930 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
9931
9932 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
9933 if (RT_SUCCESS(rcRun))
9934 { /* very likely */ }
9935 else
9936 {
9937 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
9938 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, pCtx, &VmxTransient);
9939 return rcRun;
9940 }
9941
9942 /* Profile the VM-exit. */
9943 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
9944 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
9945 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
9946 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
9947 HMVMX_START_EXIT_DISPATCH_PROF();
9948
9949 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
9950
9951 /*
9952 * Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitDebug().
9953 */
9954 rcStrict = hmR0VmxRunDebugHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, &DbgState);
9955 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
9956 if (rcStrict != VINF_SUCCESS)
9957 break;
9958 if (cLoops > pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops)
9959 {
9960 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
9961 rcStrict = VINF_EM_RAW_INTERRUPT;
9962 break;
9963 }
9964
9965 /*
9966 * Stepping: Did the RIP change, if so, consider it a single step.
9967 * Otherwise, make sure one of the TFs gets set.
9968 */
9969 if (fStepping)
9970 {
9971 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
9972 AssertRC(rc);
9973 if ( pCtx->rip != DbgState.uRipStart
9974 || pCtx->cs.Sel != DbgState.uCsStart)
9975 {
9976 rcStrict = VINF_EM_DBG_STEPPED;
9977 break;
9978 }
9979 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
9980 }
9981
9982 /*
9983 * Update when dtrace settings changes (DBGF kicks us, so no need to check).
9984 */
9985 if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo)
9986 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &DbgState, &VmxTransient);
9987 }
9988
9989 /*
9990 * Clear the X86_EFL_TF if necessary.
9991 */
9992 if (pVCpu->hm.s.fClearTrapFlag)
9993 {
9994 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);
9995 AssertRC(rc);
9996 pVCpu->hm.s.fClearTrapFlag = false;
9997 pCtx->eflags.Bits.u1TF = 0;
9998 }
9999 /** @todo there seems to be issues with the resume flag when the monitor trap
10000 * flag is pending without being used. Seen early in bios init when
10001 * accessing APIC page in protected mode. */
10002
10003 /*
10004 * Restore VM-exit control settings as we may not reenter this function the
10005 * next time around.
10006 */
10007 rcStrict = hmR0VmxRunDebugStateRevert(pVCpu, &DbgState, rcStrict);
10008
10009 /* Restore HMCPU indicators. */
10010 pVCpu->hm.s.fUsingDebugLoop = false;
10011 pVCpu->hm.s.fDebugWantRdTscExit = false;
10012 pVCpu->hm.s.fSingleInstruction = fSavedSingleInstruction;
10013
10014 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
10015 return rcStrict;
10016}
10017
10018
10019/** @} */
10020
10021
10022/**
10023 * Checks if any expensive dtrace probes are enabled and we should go to the
10024 * debug loop.
10025 *
10026 * @returns true if we should use debug loop, false if not.
10027 */
10028static bool hmR0VmxAnyExpensiveProbesEnabled(void)
10029{
10030 /* It's probably faster to OR the raw 32-bit counter variables together.
10031 Since the variables are in an array and the probes are next to one
10032 another (more or less), we have good locality. So, better read
10033 eight-nine cache lines ever time and only have one conditional, than
10034 128+ conditionals, right? */
10035 return ( VBOXVMM_R0_HMVMX_VMEXIT_ENABLED_RAW() /* expensive too due to context */
10036 | VBOXVMM_XCPT_DE_ENABLED_RAW()
10037 | VBOXVMM_XCPT_DB_ENABLED_RAW()
10038 | VBOXVMM_XCPT_BP_ENABLED_RAW()
10039 | VBOXVMM_XCPT_OF_ENABLED_RAW()
10040 | VBOXVMM_XCPT_BR_ENABLED_RAW()
10041 | VBOXVMM_XCPT_UD_ENABLED_RAW()
10042 | VBOXVMM_XCPT_NM_ENABLED_RAW()
10043 | VBOXVMM_XCPT_DF_ENABLED_RAW()
10044 | VBOXVMM_XCPT_TS_ENABLED_RAW()
10045 | VBOXVMM_XCPT_NP_ENABLED_RAW()
10046 | VBOXVMM_XCPT_SS_ENABLED_RAW()
10047 | VBOXVMM_XCPT_GP_ENABLED_RAW()
10048 | VBOXVMM_XCPT_PF_ENABLED_RAW()
10049 | VBOXVMM_XCPT_MF_ENABLED_RAW()
10050 | VBOXVMM_XCPT_AC_ENABLED_RAW()
10051 | VBOXVMM_XCPT_XF_ENABLED_RAW()
10052 | VBOXVMM_XCPT_VE_ENABLED_RAW()
10053 | VBOXVMM_XCPT_SX_ENABLED_RAW()
10054 | VBOXVMM_INT_SOFTWARE_ENABLED_RAW()
10055 | VBOXVMM_INT_HARDWARE_ENABLED_RAW()
10056 ) != 0
10057 || ( VBOXVMM_INSTR_HALT_ENABLED_RAW()
10058 | VBOXVMM_INSTR_MWAIT_ENABLED_RAW()
10059 | VBOXVMM_INSTR_MONITOR_ENABLED_RAW()
10060 | VBOXVMM_INSTR_CPUID_ENABLED_RAW()
10061 | VBOXVMM_INSTR_INVD_ENABLED_RAW()
10062 | VBOXVMM_INSTR_WBINVD_ENABLED_RAW()
10063 | VBOXVMM_INSTR_INVLPG_ENABLED_RAW()
10064 | VBOXVMM_INSTR_RDTSC_ENABLED_RAW()
10065 | VBOXVMM_INSTR_RDTSCP_ENABLED_RAW()
10066 | VBOXVMM_INSTR_RDPMC_ENABLED_RAW()
10067 | VBOXVMM_INSTR_RDMSR_ENABLED_RAW()
10068 | VBOXVMM_INSTR_WRMSR_ENABLED_RAW()
10069 | VBOXVMM_INSTR_CRX_READ_ENABLED_RAW()
10070 | VBOXVMM_INSTR_CRX_WRITE_ENABLED_RAW()
10071 | VBOXVMM_INSTR_DRX_READ_ENABLED_RAW()
10072 | VBOXVMM_INSTR_DRX_WRITE_ENABLED_RAW()
10073 | VBOXVMM_INSTR_PAUSE_ENABLED_RAW()
10074 | VBOXVMM_INSTR_XSETBV_ENABLED_RAW()
10075 | VBOXVMM_INSTR_SIDT_ENABLED_RAW()
10076 | VBOXVMM_INSTR_LIDT_ENABLED_RAW()
10077 | VBOXVMM_INSTR_SGDT_ENABLED_RAW()
10078 | VBOXVMM_INSTR_LGDT_ENABLED_RAW()
10079 | VBOXVMM_INSTR_SLDT_ENABLED_RAW()
10080 | VBOXVMM_INSTR_LLDT_ENABLED_RAW()
10081 | VBOXVMM_INSTR_STR_ENABLED_RAW()
10082 | VBOXVMM_INSTR_LTR_ENABLED_RAW()
10083 | VBOXVMM_INSTR_GETSEC_ENABLED_RAW()
10084 | VBOXVMM_INSTR_RSM_ENABLED_RAW()
10085 | VBOXVMM_INSTR_RDRAND_ENABLED_RAW()
10086 | VBOXVMM_INSTR_RDSEED_ENABLED_RAW()
10087 | VBOXVMM_INSTR_XSAVES_ENABLED_RAW()
10088 | VBOXVMM_INSTR_XRSTORS_ENABLED_RAW()
10089 | VBOXVMM_INSTR_VMM_CALL_ENABLED_RAW()
10090 | VBOXVMM_INSTR_VMX_VMCLEAR_ENABLED_RAW()
10091 | VBOXVMM_INSTR_VMX_VMLAUNCH_ENABLED_RAW()
10092 | VBOXVMM_INSTR_VMX_VMPTRLD_ENABLED_RAW()
10093 | VBOXVMM_INSTR_VMX_VMPTRST_ENABLED_RAW()
10094 | VBOXVMM_INSTR_VMX_VMREAD_ENABLED_RAW()
10095 | VBOXVMM_INSTR_VMX_VMRESUME_ENABLED_RAW()
10096 | VBOXVMM_INSTR_VMX_VMWRITE_ENABLED_RAW()
10097 | VBOXVMM_INSTR_VMX_VMXOFF_ENABLED_RAW()
10098 | VBOXVMM_INSTR_VMX_VMXON_ENABLED_RAW()
10099 | VBOXVMM_INSTR_VMX_VMFUNC_ENABLED_RAW()
10100 | VBOXVMM_INSTR_VMX_INVEPT_ENABLED_RAW()
10101 | VBOXVMM_INSTR_VMX_INVVPID_ENABLED_RAW()
10102 | VBOXVMM_INSTR_VMX_INVPCID_ENABLED_RAW()
10103 ) != 0
10104 || ( VBOXVMM_EXIT_TASK_SWITCH_ENABLED_RAW()
10105 | VBOXVMM_EXIT_HALT_ENABLED_RAW()
10106 | VBOXVMM_EXIT_MWAIT_ENABLED_RAW()
10107 | VBOXVMM_EXIT_MONITOR_ENABLED_RAW()
10108 | VBOXVMM_EXIT_CPUID_ENABLED_RAW()
10109 | VBOXVMM_EXIT_INVD_ENABLED_RAW()
10110 | VBOXVMM_EXIT_WBINVD_ENABLED_RAW()
10111 | VBOXVMM_EXIT_INVLPG_ENABLED_RAW()
10112 | VBOXVMM_EXIT_RDTSC_ENABLED_RAW()
10113 | VBOXVMM_EXIT_RDTSCP_ENABLED_RAW()
10114 | VBOXVMM_EXIT_RDPMC_ENABLED_RAW()
10115 | VBOXVMM_EXIT_RDMSR_ENABLED_RAW()
10116 | VBOXVMM_EXIT_WRMSR_ENABLED_RAW()
10117 | VBOXVMM_EXIT_CRX_READ_ENABLED_RAW()
10118 | VBOXVMM_EXIT_CRX_WRITE_ENABLED_RAW()
10119 | VBOXVMM_EXIT_DRX_READ_ENABLED_RAW()
10120 | VBOXVMM_EXIT_DRX_WRITE_ENABLED_RAW()
10121 | VBOXVMM_EXIT_PAUSE_ENABLED_RAW()
10122 | VBOXVMM_EXIT_XSETBV_ENABLED_RAW()
10123 | VBOXVMM_EXIT_SIDT_ENABLED_RAW()
10124 | VBOXVMM_EXIT_LIDT_ENABLED_RAW()
10125 | VBOXVMM_EXIT_SGDT_ENABLED_RAW()
10126 | VBOXVMM_EXIT_LGDT_ENABLED_RAW()
10127 | VBOXVMM_EXIT_SLDT_ENABLED_RAW()
10128 | VBOXVMM_EXIT_LLDT_ENABLED_RAW()
10129 | VBOXVMM_EXIT_STR_ENABLED_RAW()
10130 | VBOXVMM_EXIT_LTR_ENABLED_RAW()
10131 | VBOXVMM_EXIT_GETSEC_ENABLED_RAW()
10132 | VBOXVMM_EXIT_RSM_ENABLED_RAW()
10133 | VBOXVMM_EXIT_RDRAND_ENABLED_RAW()
10134 | VBOXVMM_EXIT_RDSEED_ENABLED_RAW()
10135 | VBOXVMM_EXIT_XSAVES_ENABLED_RAW()
10136 | VBOXVMM_EXIT_XRSTORS_ENABLED_RAW()
10137 | VBOXVMM_EXIT_VMM_CALL_ENABLED_RAW()
10138 | VBOXVMM_EXIT_VMX_VMCLEAR_ENABLED_RAW()
10139 | VBOXVMM_EXIT_VMX_VMLAUNCH_ENABLED_RAW()
10140 | VBOXVMM_EXIT_VMX_VMPTRLD_ENABLED_RAW()
10141 | VBOXVMM_EXIT_VMX_VMPTRST_ENABLED_RAW()
10142 | VBOXVMM_EXIT_VMX_VMREAD_ENABLED_RAW()
10143 | VBOXVMM_EXIT_VMX_VMRESUME_ENABLED_RAW()
10144 | VBOXVMM_EXIT_VMX_VMWRITE_ENABLED_RAW()
10145 | VBOXVMM_EXIT_VMX_VMXOFF_ENABLED_RAW()
10146 | VBOXVMM_EXIT_VMX_VMXON_ENABLED_RAW()
10147 | VBOXVMM_EXIT_VMX_VMFUNC_ENABLED_RAW()
10148 | VBOXVMM_EXIT_VMX_INVEPT_ENABLED_RAW()
10149 | VBOXVMM_EXIT_VMX_INVVPID_ENABLED_RAW()
10150 | VBOXVMM_EXIT_VMX_INVPCID_ENABLED_RAW()
10151 | VBOXVMM_EXIT_VMX_EPT_VIOLATION_ENABLED_RAW()
10152 | VBOXVMM_EXIT_VMX_EPT_MISCONFIG_ENABLED_RAW()
10153 | VBOXVMM_EXIT_VMX_VAPIC_ACCESS_ENABLED_RAW()
10154 | VBOXVMM_EXIT_VMX_VAPIC_WRITE_ENABLED_RAW()
10155 ) != 0;
10156}
10157
10158
10159/**
10160 * Runs the guest code using VT-x.
10161 *
10162 * @returns Strict VBox status code (i.e. informational status codes too).
10163 * @param pVCpu The cross context virtual CPU structure.
10164 */
10165VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPU pVCpu)
10166{
10167 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
10168 Assert(VMMRZCallRing3IsEnabled(pVCpu));
10169 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
10170 HMVMX_ASSERT_PREEMPT_SAFE();
10171
10172 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pCtx);
10173
10174 VBOXSTRICTRC rcStrict;
10175 if ( !pVCpu->hm.s.fUseDebugLoop
10176 && (!VBOXVMM_ANY_PROBES_ENABLED() || !hmR0VmxAnyExpensiveProbesEnabled())
10177 && !DBGFIsStepping(pVCpu)
10178 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
10179 rcStrict = hmR0VmxRunGuestCodeNormal(pVCpu, pCtx);
10180 else
10181 rcStrict = hmR0VmxRunGuestCodeDebug(pVCpu, pCtx);
10182
10183 if (rcStrict == VERR_EM_INTERPRETER)
10184 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
10185 else if (rcStrict == VINF_EM_RESET)
10186 rcStrict = VINF_EM_TRIPLE_FAULT;
10187
10188 int rc2 = hmR0VmxExitToRing3(pVCpu, pCtx, rcStrict);
10189 if (RT_FAILURE(rc2))
10190 {
10191 pVCpu->hm.s.u32HMError = (uint32_t)VBOXSTRICTRC_VAL(rcStrict);
10192 rcStrict = rc2;
10193 }
10194 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
10195 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
10196 return rcStrict;
10197}
10198
10199
10200#ifndef HMVMX_USE_FUNCTION_TABLE
10201DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
10202{
10203#ifdef DEBUG_ramshankar
10204#define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
10205 do { \
10206 if (a_fSave != 0) \
10207 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); \
10208 VBOXSTRICTRC rcStrict = a_CallExpr; \
10209 if (a_fSave != 0) \
10210 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \
10211 return rcStrict; \
10212 } while (0)
10213#else
10214# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
10215#endif
10216 switch (rcReason)
10217 {
10218 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient));
10219 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient));
10220 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient));
10221 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient));
10222 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient));
10223 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient));
10224 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient));
10225 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient));
10226 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient));
10227 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient));
10228 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient));
10229 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient));
10230 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient));
10231 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient));
10232 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient));
10233 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient));
10234 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient));
10235 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient));
10236 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient));
10237 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient));
10238 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient));
10239 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient));
10240 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient));
10241 case VMX_EXIT_RSM: VMEXIT_CALL_RET(0, hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient));
10242 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient));
10243 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient));
10244 case VMX_EXIT_XDTR_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));
10245 case VMX_EXIT_TR_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));
10246 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient));
10247 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient));
10248 case VMX_EXIT_RDRAND: VMEXIT_CALL_RET(0, hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient));
10249 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient));
10250 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient));
10251 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient));
10252
10253 case VMX_EXIT_TRIPLE_FAULT: return hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient);
10254 case VMX_EXIT_NMI_WINDOW: return hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient);
10255 case VMX_EXIT_INIT_SIGNAL: return hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient);
10256 case VMX_EXIT_SIPI: return hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient);
10257 case VMX_EXIT_IO_SMI: return hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient);
10258 case VMX_EXIT_SMI: return hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient);
10259 case VMX_EXIT_ERR_MSR_LOAD: return hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient);
10260 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient);
10261 case VMX_EXIT_ERR_MACHINE_CHECK: return hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient);
10262
10263 case VMX_EXIT_VMCLEAR:
10264 case VMX_EXIT_VMLAUNCH:
10265 case VMX_EXIT_VMPTRLD:
10266 case VMX_EXIT_VMPTRST:
10267 case VMX_EXIT_VMREAD:
10268 case VMX_EXIT_VMRESUME:
10269 case VMX_EXIT_VMWRITE:
10270 case VMX_EXIT_VMXOFF:
10271 case VMX_EXIT_VMXON:
10272 case VMX_EXIT_INVEPT:
10273 case VMX_EXIT_INVVPID:
10274 case VMX_EXIT_VMFUNC:
10275 case VMX_EXIT_XSAVES:
10276 case VMX_EXIT_XRSTORS:
10277 return hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
10278
10279 case VMX_EXIT_ENCLS:
10280 case VMX_EXIT_RDSEED: /* only spurious VM-exits, so undefined */
10281 case VMX_EXIT_PML_FULL:
10282 default:
10283 return hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
10284 }
10285#undef VMEXIT_CALL_RET
10286}
10287#endif /* !HMVMX_USE_FUNCTION_TABLE */
10288
10289
10290#ifdef VBOX_STRICT
10291/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
10292# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
10293 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
10294
10295# define HMVMX_ASSERT_PREEMPT_CPUID() \
10296 do { \
10297 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
10298 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
10299 } while (0)
10300
10301# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
10302 do { \
10303 AssertPtr(pVCpu); \
10304 AssertPtr(pMixedCtx); \
10305 AssertPtr(pVmxTransient); \
10306 Assert(pVmxTransient->fVMEntryFailed == false); \
10307 Assert(ASMIntAreEnabled()); \
10308 HMVMX_ASSERT_PREEMPT_SAFE(); \
10309 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
10310 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", pVCpu->idCpu)); \
10311 HMVMX_ASSERT_PREEMPT_SAFE(); \
10312 if (VMMR0IsLogFlushDisabled(pVCpu)) \
10313 HMVMX_ASSERT_PREEMPT_CPUID(); \
10314 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
10315 } while (0)
10316
10317# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
10318 do { \
10319 Log4Func(("\n")); \
10320 } while (0)
10321#else /* nonstrict builds: */
10322# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
10323 do { \
10324 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
10325 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient); \
10326 } while (0)
10327# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while (0)
10328#endif
10329
10330
10331/**
10332 * Advances the guest RIP by the specified number of bytes.
10333 *
10334 * @param pVCpu The cross context virtual CPU structure.
10335 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
10336 * out-of-sync. Make sure to update the required fields
10337 * before using them.
10338 * @param cbInstr Number of bytes to advance the RIP by.
10339 *
10340 * @remarks No-long-jump zone!!!
10341 */
10342DECLINLINE(void) hmR0VmxAdvanceGuestRipBy(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
10343{
10344 /* Advance the RIP. */
10345 pMixedCtx->rip += cbInstr;
10346 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
10347
10348 /* Update interrupt inhibition. */
10349 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10350 && pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
10351 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
10352}
10353
10354
10355/**
10356 * Advances the guest RIP after reading it from the VMCS.
10357 *
10358 * @returns VBox status code, no informational status codes.
10359 * @param pVCpu The cross context virtual CPU structure.
10360 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
10361 * out-of-sync. Make sure to update the required fields
10362 * before using them.
10363 * @param pVmxTransient Pointer to the VMX transient structure.
10364 *
10365 * @remarks No-long-jump zone!!!
10366 */
10367static int hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10368{
10369 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
10370 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
10371 AssertRCReturn(rc, rc);
10372
10373 hmR0VmxAdvanceGuestRipBy(pVCpu, pMixedCtx, pVmxTransient->cbInstr);
10374
10375 /*
10376 * Deliver a debug exception to the guest if it is single-stepping. Don't directly inject a #DB but use the
10377 * pending debug exception field as it takes care of priority of events.
10378 *
10379 * See Intel spec. 32.2.1 "Debug Exceptions".
10380 */
10381 if ( !pVCpu->hm.s.fSingleInstruction
10382 && pMixedCtx->eflags.Bits.u1TF)
10383 {
10384 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
10385 AssertRCReturn(rc, rc);
10386 }
10387
10388 return VINF_SUCCESS;
10389}
10390
10391
10392/**
10393 * Tries to determine what part of the guest-state VT-x has deemed as invalid
10394 * and update error record fields accordingly.
10395 *
10396 * @return VMX_IGS_* return codes.
10397 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
10398 * wrong with the guest state.
10399 *
10400 * @param pVCpu The cross context virtual CPU structure.
10401 * @param pCtx Pointer to the guest-CPU state.
10402 *
10403 * @remarks This function assumes our cache of the VMCS controls
10404 * are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
10405 */
10406static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu, PCPUMCTX pCtx)
10407{
10408#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
10409#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \
10410 uError = (err); \
10411 break; \
10412 } else do { } while (0)
10413
10414 int rc;
10415 PVM pVM = pVCpu->CTX_SUFF(pVM);
10416 uint32_t uError = VMX_IGS_ERROR;
10417 uint32_t u32Val;
10418 bool const fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
10419
10420 do
10421 {
10422 /*
10423 * CR0.
10424 */
10425 uint32_t fSetCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
10426 uint32_t const fZapCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
10427 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG).
10428 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
10429 if (fUnrestrictedGuest)
10430 fSetCr0 &= ~(X86_CR0_PE | X86_CR0_PG);
10431
10432 uint32_t u32GuestCr0;
10433 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCr0);
10434 AssertRCBreak(rc);
10435 HMVMX_CHECK_BREAK((u32GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
10436 HMVMX_CHECK_BREAK(!(u32GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
10437 if ( !fUnrestrictedGuest
10438 && (u32GuestCr0 & X86_CR0_PG)
10439 && !(u32GuestCr0 & X86_CR0_PE))
10440 {
10441 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
10442 }
10443
10444 /*
10445 * CR4.
10446 */
10447 uint64_t const fSetCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
10448 uint64_t const fZapCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
10449
10450 uint32_t u32GuestCr4;
10451 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCr4);
10452 AssertRCBreak(rc);
10453 HMVMX_CHECK_BREAK((u32GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
10454 HMVMX_CHECK_BREAK(!(u32GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
10455
10456 /*
10457 * IA32_DEBUGCTL MSR.
10458 */
10459 uint64_t u64Val;
10460 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
10461 AssertRCBreak(rc);
10462 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
10463 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
10464 {
10465 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
10466 }
10467 uint64_t u64DebugCtlMsr = u64Val;
10468
10469#ifdef VBOX_STRICT
10470 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
10471 AssertRCBreak(rc);
10472 Assert(u32Val == pVCpu->hm.s.vmx.u32EntryCtls);
10473#endif
10474 bool const fLongModeGuest = RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
10475
10476 /*
10477 * RIP and RFLAGS.
10478 */
10479 uint32_t u32Eflags;
10480#if HC_ARCH_BITS == 64
10481 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
10482 AssertRCBreak(rc);
10483 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
10484 if ( !fLongModeGuest
10485 || !pCtx->cs.Attr.n.u1Long)
10486 {
10487 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
10488 }
10489 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
10490 * must be identical if the "IA-32e mode guest" VM-entry
10491 * control is 1 and CS.L is 1. No check applies if the
10492 * CPU supports 64 linear-address bits. */
10493
10494 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
10495 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
10496 AssertRCBreak(rc);
10497 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
10498 VMX_IGS_RFLAGS_RESERVED);
10499 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
10500 u32Eflags = u64Val;
10501#else
10502 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
10503 AssertRCBreak(rc);
10504 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */
10505 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
10506#endif
10507
10508 if ( fLongModeGuest
10509 || ( fUnrestrictedGuest
10510 && !(u32GuestCr0 & X86_CR0_PE)))
10511 {
10512 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
10513 }
10514
10515 uint32_t u32EntryInfo;
10516 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
10517 AssertRCBreak(rc);
10518 if ( VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
10519 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
10520 {
10521 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
10522 }
10523
10524 /*
10525 * 64-bit checks.
10526 */
10527#if HC_ARCH_BITS == 64
10528 if (fLongModeGuest)
10529 {
10530 HMVMX_CHECK_BREAK(u32GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
10531 HMVMX_CHECK_BREAK(u32GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
10532 }
10533
10534 if ( !fLongModeGuest
10535 && (u32GuestCr4 & X86_CR4_PCIDE))
10536 {
10537 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
10538 }
10539
10540 /** @todo CR3 field must be such that bits 63:52 and bits in the range
10541 * 51:32 beyond the processor's physical-address width are 0. */
10542
10543 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
10544 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
10545 {
10546 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
10547 }
10548
10549 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
10550 AssertRCBreak(rc);
10551 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
10552
10553 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
10554 AssertRCBreak(rc);
10555 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
10556#endif
10557
10558 /*
10559 * PERF_GLOBAL MSR.
10560 */
10561 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR)
10562 {
10563 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
10564 AssertRCBreak(rc);
10565 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
10566 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
10567 }
10568
10569 /*
10570 * PAT MSR.
10571 */
10572 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR)
10573 {
10574 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
10575 AssertRCBreak(rc);
10576 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
10577 for (unsigned i = 0; i < 8; i++)
10578 {
10579 uint8_t u8Val = (u64Val & 0xff);
10580 if ( u8Val != 0 /* UC */
10581 && u8Val != 1 /* WC */
10582 && u8Val != 4 /* WT */
10583 && u8Val != 5 /* WP */
10584 && u8Val != 6 /* WB */
10585 && u8Val != 7 /* UC- */)
10586 {
10587 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
10588 }
10589 u64Val >>= 8;
10590 }
10591 }
10592
10593 /*
10594 * EFER MSR.
10595 */
10596 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
10597 {
10598 Assert(pVM->hm.s.vmx.fSupportsVmcsEfer);
10599 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
10600 AssertRCBreak(rc);
10601 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
10602 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
10603 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVCpu->hm.s.vmx.u32EntryCtls
10604 & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
10605 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
10606 HMVMX_CHECK_BREAK( fUnrestrictedGuest
10607 || !(u32GuestCr0 & X86_CR0_PG)
10608 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
10609 VMX_IGS_EFER_LMA_LME_MISMATCH);
10610 }
10611
10612 /*
10613 * Segment registers.
10614 */
10615 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
10616 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
10617 if (!(u32Eflags & X86_EFL_VM))
10618 {
10619 /* CS */
10620 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
10621 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
10622 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
10623 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
10624 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
10625 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
10626 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
10627 /* CS cannot be loaded with NULL in protected mode. */
10628 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
10629 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
10630 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
10631 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
10632 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
10633 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
10634 else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
10635 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
10636 else
10637 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
10638
10639 /* SS */
10640 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10641 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
10642 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
10643 if ( !(pCtx->cr0 & X86_CR0_PE)
10644 || pCtx->cs.Attr.n.u4Type == 3)
10645 {
10646 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
10647 }
10648 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
10649 {
10650 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
10651 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
10652 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
10653 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
10654 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
10655 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
10656 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
10657 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
10658 }
10659
10660 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSegmenReg(). */
10661 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
10662 {
10663 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
10664 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
10665 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10666 || pCtx->ds.Attr.n.u4Type > 11
10667 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
10668 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
10669 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
10670 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
10671 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
10672 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
10673 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
10674 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10675 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
10676 }
10677 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
10678 {
10679 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
10680 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
10681 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10682 || pCtx->es.Attr.n.u4Type > 11
10683 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
10684 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
10685 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
10686 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
10687 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
10688 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
10689 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
10690 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10691 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
10692 }
10693 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
10694 {
10695 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
10696 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
10697 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10698 || pCtx->fs.Attr.n.u4Type > 11
10699 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
10700 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
10701 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
10702 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
10703 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
10704 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
10705 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
10706 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10707 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
10708 }
10709 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
10710 {
10711 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
10712 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
10713 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10714 || pCtx->gs.Attr.n.u4Type > 11
10715 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
10716 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
10717 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
10718 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
10719 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
10720 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
10721 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
10722 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10723 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
10724 }
10725 /* 64-bit capable CPUs. */
10726#if HC_ARCH_BITS == 64
10727 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
10728 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
10729 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
10730 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
10731 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
10732 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
10733 VMX_IGS_LONGMODE_SS_BASE_INVALID);
10734 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
10735 VMX_IGS_LONGMODE_DS_BASE_INVALID);
10736 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
10737 VMX_IGS_LONGMODE_ES_BASE_INVALID);
10738#endif
10739 }
10740 else
10741 {
10742 /* V86 mode checks. */
10743 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
10744 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
10745 {
10746 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
10747 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
10748 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
10749 }
10750 else
10751 {
10752 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
10753 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
10754 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
10755 }
10756
10757 /* CS */
10758 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
10759 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
10760 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
10761 /* SS */
10762 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
10763 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
10764 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
10765 /* DS */
10766 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
10767 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
10768 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
10769 /* ES */
10770 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
10771 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
10772 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
10773 /* FS */
10774 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
10775 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
10776 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
10777 /* GS */
10778 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
10779 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
10780 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
10781 /* 64-bit capable CPUs. */
10782#if HC_ARCH_BITS == 64
10783 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
10784 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
10785 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
10786 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
10787 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
10788 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
10789 VMX_IGS_LONGMODE_SS_BASE_INVALID);
10790 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
10791 VMX_IGS_LONGMODE_DS_BASE_INVALID);
10792 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
10793 VMX_IGS_LONGMODE_ES_BASE_INVALID);
10794#endif
10795 }
10796
10797 /*
10798 * TR.
10799 */
10800 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
10801 /* 64-bit capable CPUs. */
10802#if HC_ARCH_BITS == 64
10803 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
10804#endif
10805 if (fLongModeGuest)
10806 {
10807 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
10808 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
10809 }
10810 else
10811 {
10812 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
10813 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
10814 VMX_IGS_TR_ATTR_TYPE_INVALID);
10815 }
10816 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
10817 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
10818 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
10819 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
10820 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
10821 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
10822 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
10823 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
10824
10825 /*
10826 * GDTR and IDTR.
10827 */
10828#if HC_ARCH_BITS == 64
10829 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
10830 AssertRCBreak(rc);
10831 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
10832
10833 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
10834 AssertRCBreak(rc);
10835 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
10836#endif
10837
10838 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
10839 AssertRCBreak(rc);
10840 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
10841
10842 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
10843 AssertRCBreak(rc);
10844 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
10845
10846 /*
10847 * Guest Non-Register State.
10848 */
10849 /* Activity State. */
10850 uint32_t u32ActivityState;
10851 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
10852 AssertRCBreak(rc);
10853 HMVMX_CHECK_BREAK( !u32ActivityState
10854 || (u32ActivityState & MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.Msrs.u64Misc)),
10855 VMX_IGS_ACTIVITY_STATE_INVALID);
10856 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
10857 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
10858 uint32_t u32IntrState;
10859 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32IntrState);
10860 AssertRCBreak(rc);
10861 if ( u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
10862 || u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
10863 {
10864 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
10865 }
10866
10867 /** @todo Activity state and injecting interrupts. Left as a todo since we
10868 * currently don't use activity states but ACTIVE. */
10869
10870 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
10871 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
10872
10873 /* Guest interruptibility-state. */
10874 HMVMX_CHECK_BREAK(!(u32IntrState & 0xfffffff0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
10875 HMVMX_CHECK_BREAK((u32IntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
10876 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS))
10877 != ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
10878 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
10879 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
10880 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
10881 || !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
10882 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
10883 if (VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo))
10884 {
10885 if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
10886 {
10887 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
10888 && !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
10889 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
10890 }
10891 else if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
10892 {
10893 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
10894 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
10895 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
10896 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
10897 }
10898 }
10899 /** @todo Assumes the processor is not in SMM. */
10900 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
10901 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
10902 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
10903 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
10904 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
10905 if ( (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
10906 && VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
10907 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
10908 {
10909 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI),
10910 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
10911 }
10912
10913 /* Pending debug exceptions. */
10914#if HC_ARCH_BITS == 64
10915 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val);
10916 AssertRCBreak(rc);
10917 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
10918 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
10919 u32Val = u64Val; /* For pending debug exceptions checks below. */
10920#else
10921 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val);
10922 AssertRCBreak(rc);
10923 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
10924 HMVMX_CHECK_BREAK(!(u32Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
10925#endif
10926
10927 if ( (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
10928 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS)
10929 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
10930 {
10931 if ( (u32Eflags & X86_EFL_TF)
10932 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
10933 {
10934 /* Bit 14 is PendingDebug.BS. */
10935 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
10936 }
10937 if ( !(u32Eflags & X86_EFL_TF)
10938 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
10939 {
10940 /* Bit 14 is PendingDebug.BS. */
10941 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
10942 }
10943 }
10944
10945 /* VMCS link pointer. */
10946 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
10947 AssertRCBreak(rc);
10948 if (u64Val != UINT64_C(0xffffffffffffffff))
10949 {
10950 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
10951 /** @todo Bits beyond the processor's physical-address width MBZ. */
10952 /** @todo 32-bit located in memory referenced by value of this field (as a
10953 * physical address) must contain the processor's VMCS revision ID. */
10954 /** @todo SMM checks. */
10955 }
10956
10957 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
10958 * not using Nested Paging? */
10959 if ( pVM->hm.s.fNestedPaging
10960 && !fLongModeGuest
10961 && CPUMIsGuestInPAEModeEx(pCtx))
10962 {
10963 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
10964 AssertRCBreak(rc);
10965 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10966
10967 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
10968 AssertRCBreak(rc);
10969 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10970
10971 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
10972 AssertRCBreak(rc);
10973 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10974
10975 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
10976 AssertRCBreak(rc);
10977 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10978 }
10979
10980 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
10981 if (uError == VMX_IGS_ERROR)
10982 uError = VMX_IGS_REASON_NOT_FOUND;
10983 } while (0);
10984
10985 pVCpu->hm.s.u32HMError = uError;
10986 return uError;
10987
10988#undef HMVMX_ERROR_BREAK
10989#undef HMVMX_CHECK_BREAK
10990}
10991
10992/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10993/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
10994/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10995
10996/** @name VM-exit handlers.
10997 * @{
10998 */
10999
11000/**
11001 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
11002 */
11003HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11004{
11005 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11006 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
11007 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
11008 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
11009 return VINF_SUCCESS;
11010 return VINF_EM_RAW_INTERRUPT;
11011}
11012
11013
11014/**
11015 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
11016 */
11017HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11018{
11019 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11020 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
11021
11022 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11023 AssertRCReturn(rc, rc);
11024
11025 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
11026 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)
11027 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
11028 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
11029
11030 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
11031 {
11032 /*
11033 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
11034 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
11035 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
11036 *
11037 * [1] -- See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
11038 * [2] -- See Intel spec. 27.5.5 "Updating Non-Register State".
11039 */
11040 VMXDispatchHostNmi();
11041 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
11042 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
11043 return VINF_SUCCESS;
11044 }
11045
11046 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11047 VBOXSTRICTRC rcStrictRc1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11048 if (RT_UNLIKELY(rcStrictRc1 == VINF_SUCCESS))
11049 { /* likely */ }
11050 else
11051 {
11052 if (rcStrictRc1 == VINF_HM_DOUBLE_FAULT)
11053 rcStrictRc1 = VINF_SUCCESS;
11054 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
11055 return rcStrictRc1;
11056 }
11057
11058 uint32_t uExitIntInfo = pVmxTransient->uExitIntInfo;
11059 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntInfo);
11060 switch (uIntType)
11061 {
11062 case VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT: /* Privileged software exception. (#DB from ICEBP) */
11063 Assert(uVector == X86_XCPT_DB);
11064 RT_FALL_THRU();
11065 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
11066 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT);
11067 RT_FALL_THRU();
11068 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
11069 {
11070 /*
11071 * If there's any exception caused as a result of event injection, the resulting
11072 * secondary/final execption will be pending, we shall continue guest execution
11073 * after injecting the event. The page-fault case is complicated and we manually
11074 * handle any currently pending event in hmR0VmxExitXcptPF.
11075 */
11076 if (!pVCpu->hm.s.Event.fPending)
11077 { /* likely */ }
11078 else if (uVector != X86_XCPT_PF)
11079 {
11080 rc = VINF_SUCCESS;
11081 break;
11082 }
11083
11084 switch (uVector)
11085 {
11086 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
11087 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
11088 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
11089 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
11090 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
11091 case X86_XCPT_AC: rc = hmR0VmxExitXcptAC(pVCpu, pMixedCtx, pVmxTransient); break;
11092
11093 case X86_XCPT_NM: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
11094 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11095 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
11096 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11097 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
11098 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11099 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
11100 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11101 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
11102 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11103 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
11104 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11105 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS);
11106 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11107 default:
11108 {
11109 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
11110 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
11111 {
11112 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
11113 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
11114 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
11115
11116 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);
11117 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11118 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11119 AssertRCReturn(rc, rc);
11120 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
11121 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode,
11122 0 /* GCPtrFaultAddress */);
11123 }
11124 else
11125 {
11126 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
11127 pVCpu->hm.s.u32HMError = uVector;
11128 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
11129 }
11130 break;
11131 }
11132 }
11133 break;
11134 }
11135
11136 default:
11137 {
11138 pVCpu->hm.s.u32HMError = uExitIntInfo;
11139 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
11140 AssertMsgFailed(("Unexpected interruption info %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntInfo)));
11141 break;
11142 }
11143 }
11144 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
11145 return rc;
11146}
11147
11148
11149/**
11150 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
11151 */
11152HMVMX_EXIT_NSRC_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11153{
11154 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11155
11156 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
11157 hmR0VmxClearIntWindowExitVmcs(pVCpu);
11158
11159 /* Deliver the pending interrupts via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
11160 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
11161 return VINF_SUCCESS;
11162}
11163
11164
11165/**
11166 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
11167 */
11168HMVMX_EXIT_NSRC_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11169{
11170 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11171 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)))
11172 {
11173 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
11174 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11175 }
11176
11177 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));
11178
11179 /*
11180 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
11181 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
11182 */
11183 uint32_t fIntrState = 0;
11184 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &fIntrState);
11185 AssertRCReturn(rc, rc);
11186
11187 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
11188 if ( fBlockSti
11189 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
11190 {
11191 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
11192 }
11193
11194 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
11195 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
11196
11197 /* Deliver the pending NMI via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
11198 return VINF_SUCCESS;
11199}
11200
11201
11202/**
11203 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
11204 */
11205HMVMX_EXIT_NSRC_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11206{
11207 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11208 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11209}
11210
11211
11212/**
11213 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
11214 */
11215HMVMX_EXIT_NSRC_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11216{
11217 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11218 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11219}
11220
11221
11222/**
11223 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
11224 */
11225HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11226{
11227 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11228 Assert(pMixedCtx == &pVCpu->cpum.GstCtx);
11229
11230 /*
11231 * Get the state we need and update the exit history entry.
11232 */
11233 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11234 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11235 AssertRCReturn(rc, rc);
11236
11237 VBOXSTRICTRC rcStrict;
11238 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
11239 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
11240 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
11241 if (!pExitRec)
11242 {
11243 /*
11244 * Regular CPUID instruction execution.
11245 */
11246 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbInstr);
11247 if (rcStrict == VINF_SUCCESS)
11248 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RAX
11249 | HM_CHANGED_GUEST_RCX | HM_CHANGED_GUEST_RDX | HM_CHANGED_GUEST_RBX);
11250 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11251 {
11252 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK);
11253 rcStrict = VINF_SUCCESS;
11254 }
11255 }
11256 else
11257 {
11258 /*
11259 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
11260 */
11261 Assert(pMixedCtx == &pVCpu->cpum.GstCtx);
11262 int rc2 = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
11263 AssertRCReturn(rc2, rc2);
11264
11265 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
11266 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
11267
11268 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
11269 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
11270
11271 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
11272 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
11273 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
11274 }
11275 return rcStrict;
11276}
11277
11278
11279/**
11280 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
11281 */
11282HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11283{
11284 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11285 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR4);
11286 AssertRCReturn(rc, rc);
11287
11288 if (pMixedCtx->cr4 & X86_CR4_SMXE)
11289 return VINF_EM_RAW_EMULATE_INSTR;
11290
11291 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
11292 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11293}
11294
11295
11296/**
11297 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
11298 */
11299HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11300{
11301 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11302 int rc = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
11303 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11304 AssertRCReturn(rc, rc);
11305
11306 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbInstr);
11307 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
11308 {
11309 /* If we get a spurious VM-exit when offsetting is enabled,
11310 we must reset offsetting on VM-reentry. See @bugref{6634}. */
11311 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
11312 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
11313 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
11314 | HM_CHANGED_GUEST_RAX | HM_CHANGED_GUEST_RDX);
11315 }
11316 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11317 {
11318 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK);
11319 rcStrict = VINF_SUCCESS;
11320 }
11321 return rcStrict;
11322}
11323
11324
11325/**
11326 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
11327 */
11328HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11329{
11330 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11331 int rc = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
11332 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11333 AssertRCReturn(rc, rc);
11334
11335 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbInstr);
11336 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
11337 {
11338 /* If we get a spurious VM-exit when offsetting is enabled,
11339 we must reset offsetting on VM-reentry. See @bugref{6634}. */
11340 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
11341 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
11342 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
11343 | HM_CHANGED_GUEST_RAX | HM_CHANGED_GUEST_RDX | HM_CHANGED_GUEST_RCX);
11344 }
11345 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11346 {
11347 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK);
11348 rcStrict = VINF_SUCCESS;
11349 }
11350 return rcStrict;
11351}
11352
11353
11354/**
11355 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
11356 */
11357HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11358{
11359 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11360 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
11361 AssertRCReturn(rc, rc);
11362
11363 PVM pVM = pVCpu->CTX_SUFF(pVM);
11364 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11365 if (RT_LIKELY(rc == VINF_SUCCESS))
11366 {
11367 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11368 Assert(pVmxTransient->cbInstr == 2);
11369 }
11370 else
11371 {
11372 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
11373 rc = VERR_EM_INTERPRETER;
11374 }
11375 return rc;
11376}
11377
11378
11379/**
11380 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
11381 */
11382HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11383{
11384 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11385
11386 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
11387 if (EMAreHypercallInstructionsEnabled(pVCpu))
11388 {
11389 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_SS
11390 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
11391 AssertRCReturn(rc, rc);
11392
11393 /* Perform the hypercall. */
11394 rcStrict = GIMHypercall(pVCpu, pMixedCtx);
11395 if (rcStrict == VINF_SUCCESS)
11396 {
11397 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11398 AssertRCReturn(rc, rc);
11399 }
11400 else
11401 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
11402 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
11403 || RT_FAILURE(rcStrict));
11404
11405 /* If the hypercall changes anything other than guest's general-purpose registers,
11406 we would need to reload the guest changed bits here before VM-entry. */
11407 }
11408 else
11409 Log4Func(("Hypercalls not enabled\n"));
11410
11411 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
11412 if (RT_FAILURE(rcStrict))
11413 {
11414 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
11415 rcStrict = VINF_SUCCESS;
11416 }
11417
11418 return rcStrict;
11419}
11420
11421
11422/**
11423 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
11424 */
11425HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11426{
11427 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11428 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hm.s.fUsingDebugLoop);
11429
11430 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11431 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11432 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11433 AssertRCReturn(rc, rc);
11434
11435 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbInstr, pVmxTransient->uExitQualification);
11436
11437 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
11438 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11439 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11440 {
11441 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK);
11442 rcStrict = VINF_SUCCESS;
11443 }
11444 else
11445 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) sttus: %Rrc\n",
11446 pVmxTransient->uExitQualification, VBOXSTRICTRC_VAL(rcStrict)));
11447 return rcStrict;
11448}
11449
11450
11451/**
11452 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
11453 */
11454HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11455{
11456 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11457 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
11458 AssertRCReturn(rc, rc);
11459
11460 PVM pVM = pVCpu->CTX_SUFF(pVM);
11461 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11462 if (RT_LIKELY(rc == VINF_SUCCESS))
11463 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11464 else
11465 {
11466 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
11467 rc = VERR_EM_INTERPRETER;
11468 }
11469 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
11470 return rc;
11471}
11472
11473
11474/**
11475 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
11476 */
11477HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11478{
11479 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11480 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
11481 AssertRCReturn(rc, rc);
11482
11483 PVM pVM = pVCpu->CTX_SUFF(pVM);
11484 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11485 rc = VBOXSTRICTRC_VAL(rc2);
11486 if (RT_LIKELY( rc == VINF_SUCCESS
11487 || rc == VINF_EM_HALT))
11488 {
11489 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11490 AssertRCReturn(rc3, rc3);
11491
11492 if ( rc == VINF_EM_HALT
11493 && EMMonitorWaitShouldContinue(pVCpu, pMixedCtx))
11494 rc = VINF_SUCCESS;
11495 }
11496 else
11497 {
11498 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
11499 rc = VERR_EM_INTERPRETER;
11500 }
11501 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
11502 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
11503 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
11504 return rc;
11505}
11506
11507
11508/**
11509 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
11510 */
11511HMVMX_EXIT_NSRC_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11512{
11513 /*
11514 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root
11515 * mode. In theory, we should never get this VM-exit. This can happen only if dual-monitor
11516 * treatment of SMI and VMX is enabled, which can (only?) be done by executing VMCALL in
11517 * VMX root operation. If we get here, something funny is going on.
11518 *
11519 * See Intel spec. 33.15.5 "Enabling the Dual-Monitor Treatment".
11520 */
11521 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11522 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11523 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11524}
11525
11526
11527/**
11528 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
11529 */
11530HMVMX_EXIT_NSRC_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11531{
11532 /*
11533 * This can only happen if we support dual-monitor treatment of SMI, which can be activated
11534 * by executing VMCALL in VMX root operation. Only an STM (SMM transfer monitor) would get
11535 * this VM-exit when we (the executive monitor) execute a VMCALL in VMX root mode or receive
11536 * an SMI. If we get here, something funny is going on.
11537 *
11538 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
11539 * See Intel spec. 25.3 "Other Causes of VM-Exits"
11540 */
11541 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11542 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11543 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11544}
11545
11546
11547/**
11548 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
11549 */
11550HMVMX_EXIT_NSRC_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11551{
11552 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
11553 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11554 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11555 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11556}
11557
11558
11559/**
11560 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
11561 */
11562HMVMX_EXIT_NSRC_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11563{
11564 /*
11565 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used.
11566 * We don't make use of it as our guests don't have direct access to the host LAPIC.
11567 * See Intel spec. 25.3 "Other Causes of VM-exits".
11568 */
11569 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11570 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11571 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11572}
11573
11574
11575/**
11576 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
11577 * VM-exit.
11578 */
11579HMVMX_EXIT_NSRC_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11580{
11581 /*
11582 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
11583 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery" and Intel spec. 29.3 "VMX Instructions" for "VMXON".
11584 *
11585 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these VM-exits.
11586 * See Intel spec. "23.8 Restrictions on VMX operation".
11587 */
11588 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11589 return VINF_SUCCESS;
11590}
11591
11592
11593/**
11594 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
11595 * VM-exit.
11596 */
11597HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11598{
11599 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11600 return VINF_EM_RESET;
11601}
11602
11603
11604/**
11605 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
11606 */
11607HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11608{
11609 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11610 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
11611
11612 int rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11613 AssertRCReturn(rc, rc);
11614
11615 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
11616 rc = VINF_SUCCESS;
11617 else
11618 rc = VINF_EM_HALT;
11619
11620 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
11621 if (rc != VINF_SUCCESS)
11622 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
11623 return rc;
11624}
11625
11626
11627/**
11628 * VM-exit handler for instructions that result in a \#UD exception delivered to
11629 * the guest.
11630 */
11631HMVMX_EXIT_NSRC_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11632{
11633 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11634 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
11635 return VINF_SUCCESS;
11636}
11637
11638
11639/**
11640 * VM-exit handler for expiry of the VMX preemption timer.
11641 */
11642HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11643{
11644 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11645
11646 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
11647 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
11648
11649 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
11650 PVM pVM = pVCpu->CTX_SUFF(pVM);
11651 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
11652 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
11653 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
11654}
11655
11656
11657/**
11658 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
11659 */
11660HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11661{
11662 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11663
11664 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11665 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
11666 AssertRCReturn(rc, rc);
11667
11668 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbInstr);
11669 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
11670 : HM_CHANGED_XCPT_RAISED_MASK);
11671
11672 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
11673
11674 return rcStrict;
11675}
11676
11677
11678/**
11679 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
11680 */
11681HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11682{
11683 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11684 /** @todo Use VM-exit instruction information. */
11685 return VERR_EM_INTERPRETER;
11686}
11687
11688
11689/**
11690 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
11691 * Error VM-exit.
11692 */
11693HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11694{
11695 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
11696 AssertRCReturn(rc, rc);
11697 rc = hmR0VmxCheckVmcsCtls(pVCpu);
11698 if (RT_FAILURE(rc))
11699 return rc;
11700
11701 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pMixedCtx);
11702 NOREF(uInvalidReason);
11703
11704#ifdef VBOX_STRICT
11705 uint32_t fIntrState;
11706 RTHCUINTREG uHCReg;
11707 uint64_t u64Val;
11708 uint32_t u32Val;
11709
11710 rc = hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
11711 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
11712 rc |= hmR0VmxReadEntryInstrLenVmcs(pVmxTransient);
11713 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &fIntrState);
11714 AssertRCReturn(rc, rc);
11715
11716 Log4(("uInvalidReason %u\n", uInvalidReason));
11717 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
11718 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
11719 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
11720 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", fIntrState));
11721
11722 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
11723 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
11724 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
11725 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
11726 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
11727 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
11728 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
11729 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
11730 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
11731 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
11732 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
11733 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
11734
11735 hmR0DumpRegs(pVCpu, pMixedCtx);
11736#else
11737 NOREF(pVmxTransient);
11738#endif
11739
11740 return VERR_VMX_INVALID_GUEST_STATE;
11741}
11742
11743
11744/**
11745 * VM-exit handler for VM-entry failure due to an MSR-load
11746 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
11747 */
11748HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11749{
11750 NOREF(pVmxTransient);
11751 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
11752 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11753}
11754
11755
11756/**
11757 * VM-exit handler for VM-entry failure due to a machine-check event
11758 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
11759 */
11760HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11761{
11762 NOREF(pVmxTransient);
11763 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
11764 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11765}
11766
11767
11768/**
11769 * VM-exit handler for all undefined reasons. Should never ever happen.. in
11770 * theory.
11771 */
11772HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11773{
11774 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
11775 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient);
11776 return VERR_VMX_UNDEFINED_EXIT_CODE;
11777}
11778
11779
11780/**
11781 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
11782 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
11783 * Conditional VM-exit.
11784 */
11785HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11786{
11787 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11788
11789 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
11790 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
11791 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
11792 return VERR_EM_INTERPRETER;
11793 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11794 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11795}
11796
11797
11798/**
11799 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
11800 */
11801HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11802{
11803 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11804
11805 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
11806 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
11807 return VERR_EM_INTERPRETER;
11808 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11809 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11810}
11811
11812
11813/**
11814 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
11815 */
11816HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11817{
11818 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11819
11820 /** @todo Optimize this: We currently drag in in the whole MSR state
11821 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
11822 * MSRs required. That would require changes to IEM and possibly CPUM too.
11823 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
11824 uint32_t const idMsr = pMixedCtx->ecx; NOREF(idMsr); /* Save it. */
11825 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11826 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
11827 AssertRCReturn(rc, rc);
11828
11829 Log4Func(("ecx=%#RX32\n", idMsr));
11830
11831#ifdef VBOX_STRICT
11832 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
11833 {
11834 if ( hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr)
11835 && idMsr != MSR_K6_EFER)
11836 {
11837 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
11838 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11839 }
11840 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
11841 {
11842 VMXMSREXITREAD enmRead;
11843 VMXMSREXITWRITE enmWrite;
11844 int rc2 = hmR0VmxGetMsrPermission(pVCpu, idMsr, &enmRead, &enmWrite);
11845 AssertRCReturn(rc2, rc2);
11846 if (enmRead == VMXMSREXIT_PASSTHRU_READ)
11847 {
11848 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
11849 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11850 }
11851 }
11852 }
11853#endif
11854
11855 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbInstr);
11856 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
11857 if (rcStrict == VINF_SUCCESS)
11858 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
11859 | HM_CHANGED_GUEST_RAX | HM_CHANGED_GUEST_RDX);
11860 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11861 {
11862 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK);
11863 rcStrict = VINF_SUCCESS;
11864 }
11865 else
11866 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ, ("Unexpected IEMExecDecodedRdmsr status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11867
11868 return rcStrict;
11869}
11870
11871
11872/**
11873 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
11874 */
11875HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11876{
11877 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11878
11879 /** @todo Optimize this: We currently drag in in the whole MSR state
11880 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
11881 * MSRs required. That would require changes to IEM and possibly CPUM too.
11882 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
11883 uint32_t const idMsr = pMixedCtx->ecx; /* Save it. */
11884 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11885 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
11886 AssertRCReturn(rc, rc);
11887
11888 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pMixedCtx->edx, pMixedCtx->eax));
11889
11890 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbInstr);
11891 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
11892
11893 if (rcStrict == VINF_SUCCESS)
11894 {
11895 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11896
11897 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
11898 if ( idMsr == MSR_IA32_APICBASE
11899 || ( idMsr >= MSR_IA32_X2APIC_START
11900 && idMsr <= MSR_IA32_X2APIC_END))
11901 {
11902 /*
11903 * We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register
11904 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before IEM changes it.
11905 */
11906 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
11907 }
11908 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
11909 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
11910 else if (idMsr == MSR_K6_EFER)
11911 {
11912 /*
11913 * If the guest touches EFER we need to update the VM-Entry and VM-Exit controls as well,
11914 * even if it is -not- touching bits that cause paging mode changes (LMA/LME). We care about
11915 * the other bits as well, SCE and NXE. See @bugref{7368}.
11916 */
11917 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS
11918 | HM_CHANGED_VMX_EXIT_CTLS);
11919 }
11920
11921 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */
11922 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
11923 {
11924 switch (idMsr)
11925 {
11926 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
11927 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
11928 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
11929 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS); break;
11930 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS); break;
11931 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
11932 default:
11933 {
11934 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr))
11935 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
11936 else if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
11937 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
11938 break;
11939 }
11940 }
11941 }
11942#ifdef VBOX_STRICT
11943 else
11944 {
11945 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
11946 switch (idMsr)
11947 {
11948 case MSR_IA32_SYSENTER_CS:
11949 case MSR_IA32_SYSENTER_EIP:
11950 case MSR_IA32_SYSENTER_ESP:
11951 case MSR_K8_FS_BASE:
11952 case MSR_K8_GS_BASE:
11953 {
11954 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
11955 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11956 }
11957
11958 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
11959 default:
11960 {
11961 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr))
11962 {
11963 /* EFER writes are always intercepted, see hmR0VmxExportGuestMsrs(). */
11964 if (idMsr != MSR_K6_EFER)
11965 {
11966 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
11967 idMsr));
11968 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11969 }
11970 }
11971
11972 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
11973 {
11974 VMXMSREXITREAD enmRead;
11975 VMXMSREXITWRITE enmWrite;
11976 int rc2 = hmR0VmxGetMsrPermission(pVCpu, idMsr, &enmRead, &enmWrite);
11977 AssertRCReturn(rc2, rc2);
11978 if (enmWrite == VMXMSREXIT_PASSTHRU_WRITE)
11979 {
11980 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
11981 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11982 }
11983 }
11984 break;
11985 }
11986 }
11987 }
11988#endif /* VBOX_STRICT */
11989 }
11990 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11991 {
11992 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK);
11993 rcStrict = VINF_SUCCESS;
11994 }
11995 else
11996 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE, ("Unexpected IEMExecDecodedWrmsr status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11997
11998 return rcStrict;
11999}
12000
12001
12002/**
12003 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
12004 */
12005HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12006{
12007 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12008 /** @todo The guest has likely hit a contended spinlock. We might want to
12009 * poke a schedule different guest VCPU. */
12010 return VINF_EM_RAW_INTERRUPT;
12011}
12012
12013
12014/**
12015 * VM-exit handler for when the TPR value is lowered below the specified
12016 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
12017 */
12018HMVMX_EXIT_NSRC_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12019{
12020 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12021 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
12022
12023 /*
12024 * The TPR shadow would've been synced with the APIC TPR in hmR0VmxPostRunGuest(). We'll re-evaluate
12025 * pending interrupts and inject them before the next VM-entry so we can just continue execution here.
12026 */
12027 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
12028 return VINF_SUCCESS;
12029}
12030
12031
12032/**
12033 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
12034 * VM-exit.
12035 *
12036 * @retval VINF_SUCCESS when guest execution can continue.
12037 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
12038 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
12039 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
12040 * interpreter.
12041 */
12042HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12043{
12044 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12045 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
12046
12047 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12048 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12049 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
12050 AssertRCReturn(rc, rc);
12051
12052 VBOXSTRICTRC rcStrict;
12053 PVM pVM = pVCpu->CTX_SUFF(pVM);
12054 RTGCUINTPTR const uExitQualification = pVmxTransient->uExitQualification;
12055 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQualification);
12056 switch (uAccessType)
12057 {
12058 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE: /* MOV to CRx */
12059 {
12060 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr,
12061 VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification),
12062 VMX_EXIT_QUAL_CRX_GENREG(uExitQualification));
12063 AssertMsg( rcStrict == VINF_SUCCESS
12064 || rcStrict == VINF_IEM_RAISED_XCPT
12065 || rcStrict == VINF_PGM_CHANGE_MODE
12066 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12067
12068 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification))
12069 {
12070 case 0:
12071 {
12072 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
12073 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
12074 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
12075 Log4(("CRX CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr0));
12076 break;
12077 }
12078
12079 case 2:
12080 {
12081 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write);
12082 /* Nothing to do here, CR2 it's not part of the VMCS. */
12083 break;
12084 }
12085
12086 case 3:
12087 {
12088 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx) || pVCpu->hm.s.fUsingDebugLoop);
12089 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write);
12090 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
12091 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
12092 Log4(("CRX CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr3));
12093 break;
12094 }
12095
12096 case 4:
12097 {
12098 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write);
12099 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
12100 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
12101 Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
12102 pMixedCtx->cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
12103 break;
12104 }
12105
12106 case 8:
12107 {
12108 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write);
12109 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
12110 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
12111 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
12112 break;
12113 }
12114 default:
12115 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification)));
12116 break;
12117 }
12118 break;
12119 }
12120
12121 case VMX_EXIT_QUAL_CRX_ACCESS_READ: /* MOV from CRx */
12122 {
12123 Assert( !pVM->hm.s.fNestedPaging
12124 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
12125 || pVCpu->hm.s.fUsingDebugLoop
12126 || VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 3);
12127 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
12128 Assert( VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 8
12129 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
12130
12131 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr,
12132 VMX_EXIT_QUAL_CRX_GENREG(uExitQualification),
12133 VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification));
12134 AssertMsg( rcStrict == VINF_SUCCESS
12135 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12136#ifdef VBOX_WITH_STATISTICS
12137 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification))
12138 {
12139 case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
12140 case 2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break;
12141 case 3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break;
12142 case 4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break;
12143 case 8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break;
12144 }
12145#endif
12146 Log4(("CRX CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification),
12147 VBOXSTRICTRC_VAL(rcStrict)));
12148 if (VMX_EXIT_QUAL_CRX_GENREG(uExitQualification) == X86_GREG_xSP)
12149 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
12150 else
12151 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
12152 break;
12153 }
12154
12155 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
12156 {
12157 rcStrict = IEMExecDecodedClts(pVCpu, pVmxTransient->cbInstr);
12158 AssertMsg( rcStrict == VINF_SUCCESS
12159 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12160
12161 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
12162 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
12163 Log4(("CRX CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
12164 break;
12165 }
12166
12167 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
12168 {
12169 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr,
12170 VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQualification));
12171 AssertMsg( rcStrict == VINF_SUCCESS
12172 || rcStrict == VINF_IEM_RAISED_XCPT
12173 || rcStrict == VINF_PGM_CHANGE_MODE,
12174 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12175
12176 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
12177 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
12178 Log4(("CRX LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
12179 break;
12180 }
12181
12182 default:
12183 AssertMsgFailedReturn(("Invalid access-type in Mov CRx VM-exit qualification %#x\n", uAccessType),
12184 VERR_VMX_UNEXPECTED_EXCEPTION);
12185 }
12186
12187 Assert( (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
12188 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
12189 if (rcStrict == VINF_IEM_RAISED_XCPT)
12190 {
12191 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK);
12192 rcStrict = VINF_SUCCESS;
12193 }
12194
12195 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
12196 NOREF(pVM);
12197 return rcStrict;
12198}
12199
12200
12201/**
12202 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
12203 * VM-exit.
12204 */
12205HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12206{
12207 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12208 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
12209 Assert(pMixedCtx == &pVCpu->cpum.GstCtx);
12210
12211 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12212 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12213 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER);
12214 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
12215 AssertRCReturn(rc, rc);
12216
12217 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
12218 uint32_t uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQualification);
12219 uint8_t uIOWidth = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQualification);
12220 bool fIOWrite = ( VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQualification)
12221 == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
12222 bool fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQualification);
12223 bool fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
12224 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction;
12225 AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_VMX_IPE_1);
12226
12227 /*
12228 * Update exit history to see if this exit can be optimized.
12229 */
12230 VBOXSTRICTRC rcStrict;
12231 PCEMEXITREC pExitRec = NULL;
12232 if ( !fGstStepping
12233 && !fDbgStepping)
12234 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
12235 !fIOString
12236 ? !fIOWrite
12237 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
12238 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
12239 : !fIOWrite
12240 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
12241 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
12242 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
12243 if (!pExitRec)
12244 {
12245 /* I/O operation lookup arrays. */
12246 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */
12247 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
12248 uint32_t const cbValue = s_aIOSizes[uIOWidth];
12249 uint32_t const cbInstr = pVmxTransient->cbInstr;
12250 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
12251 PVM pVM = pVCpu->CTX_SUFF(pVM);
12252 if (fIOString)
12253 {
12254 /*
12255 * INS/OUTS - I/O String instruction.
12256 *
12257 * Use instruction-information if available, otherwise fall back on
12258 * interpreting the instruction.
12259 */
12260 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue,
12261 fIOWrite ? 'w' : 'r'));
12262 AssertReturn(pMixedCtx->dx == uIOPort, VERR_VMX_IPE_2);
12263 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo))
12264 {
12265 int rc2 = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
12266 AssertRCReturn(rc2, rc2);
12267 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
12268 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
12269 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
12270 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification);
12271 if (fIOWrite)
12272 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
12273 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
12274 else
12275 {
12276 /*
12277 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
12278 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
12279 * See Intel Instruction spec. for "INS".
12280 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
12281 */
12282 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
12283 }
12284 }
12285 else
12286 rcStrict = IEMExecOne(pVCpu);
12287
12288 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
12289 fUpdateRipAlready = true;
12290 }
12291 else
12292 {
12293 /*
12294 * IN/OUT - I/O instruction.
12295 */
12296 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue,
12297 fIOWrite ? 'w' : 'r'));
12298 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
12299 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification));
12300 if (fIOWrite)
12301 {
12302 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue);
12303 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
12304 }
12305 else
12306 {
12307 uint32_t u32Result = 0;
12308 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
12309 if (IOM_SUCCESS(rcStrict))
12310 {
12311 /* Save result of I/O IN instr. in AL/AX/EAX. */
12312 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
12313 }
12314 else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
12315 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
12316 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
12317 }
12318 }
12319
12320 if (IOM_SUCCESS(rcStrict))
12321 {
12322 if (!fUpdateRipAlready)
12323 {
12324 hmR0VmxAdvanceGuestRipBy(pVCpu, pMixedCtx, cbInstr);
12325 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
12326 }
12327
12328 /*
12329 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
12330 * while booting Fedora 17 64-bit guest.
12331 *
12332 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
12333 */
12334 if (fIOString)
12335 {
12336 /** @todo Single-step for INS/OUTS with REP prefix? */
12337 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
12338 }
12339 else if ( !fDbgStepping
12340 && fGstStepping)
12341 {
12342 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
12343 AssertRCReturn(rc, rc);
12344 }
12345
12346 /*
12347 * If any I/O breakpoints are armed, we need to check if one triggered
12348 * and take appropriate action.
12349 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
12350 */
12351 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_DR7);
12352 AssertRCReturn(rc, rc);
12353
12354 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
12355 * execution engines about whether hyper BPs and such are pending. */
12356 uint32_t const uDr7 = pMixedCtx->dr[7];
12357 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
12358 && X86_DR7_ANY_RW_IO(uDr7)
12359 && (pMixedCtx->cr4 & X86_CR4_DE))
12360 || DBGFBpIsHwIoArmed(pVM)))
12361 {
12362 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
12363
12364 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
12365 VMMRZCallRing3Disable(pVCpu);
12366 HM_DISABLE_PREEMPT();
12367
12368 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
12369
12370 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue);
12371 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
12372 {
12373 /* Raise #DB. */
12374 if (fIsGuestDbgActive)
12375 ASMSetDR6(pMixedCtx->dr[6]);
12376 if (pMixedCtx->dr[7] != uDr7)
12377 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR7;
12378
12379 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
12380 }
12381 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
12382 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
12383 else if ( rcStrict2 != VINF_SUCCESS
12384 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
12385 rcStrict = rcStrict2;
12386 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
12387
12388 HM_RESTORE_PREEMPT();
12389 VMMRZCallRing3Enable(pVCpu);
12390 }
12391 }
12392
12393#ifdef VBOX_STRICT
12394 if (rcStrict == VINF_IOM_R3_IOPORT_READ)
12395 Assert(!fIOWrite);
12396 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE)
12397 Assert(fIOWrite);
12398 else
12399 {
12400# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
12401 * statuses, that the VMM device and some others may return. See
12402 * IOM_SUCCESS() for guidance. */
12403 AssertMsg( RT_FAILURE(rcStrict)
12404 || rcStrict == VINF_SUCCESS
12405 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
12406 || rcStrict == VINF_EM_DBG_BREAKPOINT
12407 || rcStrict == VINF_EM_RAW_GUEST_TRAP
12408 || rcStrict == VINF_EM_RAW_TO_R3
12409 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12410# endif
12411 }
12412#endif
12413 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
12414 }
12415 else
12416 {
12417 /*
12418 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
12419 */
12420 int rc2 = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
12421 AssertRCReturn(rc2, rc2);
12422 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead
12423 : fIOWrite ? &pVCpu->hm.s.StatExitIOStringWrite : &pVCpu->hm.s.StatExitIOStringRead);
12424 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
12425 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
12426 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification) ? "REP " : "",
12427 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOWidth));
12428
12429 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
12430 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
12431
12432 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
12433 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
12434 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
12435 }
12436 return rcStrict;
12437}
12438
12439
12440/**
12441 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
12442 * VM-exit.
12443 */
12444HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12445{
12446 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12447
12448 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
12449 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12450 AssertRCReturn(rc, rc);
12451 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
12452 {
12453 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
12454 AssertRCReturn(rc, rc);
12455 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
12456 {
12457 uint32_t uErrCode;
12458 RTGCUINTPTR GCPtrFaultAddress;
12459 uint32_t const uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
12460 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
12461 bool const fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
12462 if (fErrorCodeValid)
12463 {
12464 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
12465 AssertRCReturn(rc, rc);
12466 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
12467 }
12468 else
12469 uErrCode = 0;
12470
12471 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
12472 && uVector == X86_XCPT_PF)
12473 GCPtrFaultAddress = pMixedCtx->cr2;
12474 else
12475 GCPtrFaultAddress = 0;
12476
12477 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
12478 0 /* cbInstr */, uErrCode, GCPtrFaultAddress);
12479
12480 Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
12481 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
12482 return VINF_EM_RAW_INJECT_TRPM_EVENT;
12483 }
12484 }
12485
12486 /* Fall back to the interpreter to emulate the task-switch. */
12487 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
12488 return VERR_EM_INTERPRETER;
12489}
12490
12491
12492/**
12493 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
12494 */
12495HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12496{
12497 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12498 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
12499 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
12500 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
12501 AssertRCReturn(rc, rc);
12502 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
12503 return VINF_EM_DBG_STEPPED;
12504}
12505
12506
12507/**
12508 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
12509 */
12510HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12511{
12512 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12513
12514 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
12515
12516 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
12517 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
12518 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
12519 {
12520 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
12521 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
12522 {
12523 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret);
12524 return VINF_EM_RAW_INJECT_TRPM_EVENT;
12525 }
12526 }
12527 else
12528 {
12529 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
12530 rcStrict1 = VINF_SUCCESS;
12531 return rcStrict1;
12532 }
12533
12534 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
12535 int rc = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
12536 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12537 AssertRCReturn(rc, rc);
12538
12539 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
12540 uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
12541 VBOXSTRICTRC rcStrict2;
12542 switch (uAccessType)
12543 {
12544 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
12545 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
12546 {
12547 AssertMsg( !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
12548 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) != XAPIC_OFF_TPR,
12549 ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
12550
12551 RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64MsrApicBase; /* Always up-to-date, u64MsrApicBase is not part of the VMCS. */
12552 GCPhys &= PAGE_BASE_GC_MASK;
12553 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
12554 PVM pVM = pVCpu->CTX_SUFF(pVM);
12555 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
12556 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
12557
12558 rcStrict2 = IOMMMIOPhysHandler(pVM, pVCpu,
12559 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW,
12560 CPUMCTX2CORE(pMixedCtx), GCPhys);
12561 Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
12562 if ( rcStrict2 == VINF_SUCCESS
12563 || rcStrict2 == VERR_PAGE_TABLE_NOT_PRESENT
12564 || rcStrict2 == VERR_PAGE_NOT_PRESENT)
12565 {
12566 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
12567 | HM_CHANGED_GUEST_APIC_TPR);
12568 rcStrict2 = VINF_SUCCESS;
12569 }
12570 break;
12571 }
12572
12573 default:
12574 Log4Func(("uAccessType=%#x\n", uAccessType));
12575 rcStrict2 = VINF_EM_RAW_EMULATE_INSTR;
12576 break;
12577 }
12578
12579 if (rcStrict2 != VINF_SUCCESS)
12580 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchApicAccessToR3);
12581 return rcStrict2;
12582}
12583
12584
12585/**
12586 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
12587 * VM-exit.
12588 */
12589HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12590{
12591 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12592
12593 /* We should -not- get this VM-exit if the guest's debug registers were active. */
12594 if (pVmxTransient->fWasGuestDebugStateActive)
12595 {
12596 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
12597 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
12598 }
12599
12600 if ( !pVCpu->hm.s.fSingleInstruction
12601 && !pVmxTransient->fWasHyperDebugStateActive)
12602 {
12603 Assert(!DBGFIsStepping(pVCpu));
12604 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB));
12605
12606 /* Don't intercept MOV DRx any more. */
12607 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
12608 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
12609 AssertRCReturn(rc, rc);
12610
12611 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
12612 VMMRZCallRing3Disable(pVCpu);
12613 HM_DISABLE_PREEMPT();
12614
12615 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
12616 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
12617 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
12618
12619 HM_RESTORE_PREEMPT();
12620 VMMRZCallRing3Enable(pVCpu);
12621
12622#ifdef VBOX_WITH_STATISTICS
12623 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12624 AssertRCReturn(rc, rc);
12625 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
12626 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
12627 else
12628 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
12629#endif
12630 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
12631 return VINF_SUCCESS;
12632 }
12633
12634 /*
12635 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date.
12636 * Update the segment registers and DR7 from the CPU.
12637 */
12638 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12639 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
12640 AssertRCReturn(rc, rc);
12641 Log4Func(("CS:RIP=%04x:%08RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
12642
12643 PVM pVM = pVCpu->CTX_SUFF(pVM);
12644 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
12645 {
12646 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
12647 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification),
12648 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification));
12649 if (RT_SUCCESS(rc))
12650 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
12651 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
12652 }
12653 else
12654 {
12655 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
12656 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification),
12657 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification));
12658 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
12659 }
12660
12661 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
12662 if (RT_SUCCESS(rc))
12663 {
12664 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
12665 AssertRCReturn(rc2, rc2);
12666 return VINF_SUCCESS;
12667 }
12668 return rc;
12669}
12670
12671
12672/**
12673 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
12674 * Conditional VM-exit.
12675 */
12676HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12677{
12678 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12679 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
12680
12681 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
12682 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
12683 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
12684 {
12685 /* If event delivery causes an EPT misconfig (MMIO), go back to instruction emulation as otherwise
12686 injecting the original pending event would most likely cause the same EPT misconfig VM-exit. */
12687 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
12688 {
12689 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret);
12690 return VINF_EM_RAW_INJECT_TRPM_EVENT;
12691 }
12692 }
12693 else
12694 {
12695 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
12696 rcStrict1 = VINF_SUCCESS;
12697 return rcStrict1;
12698 }
12699
12700 /*
12701 * Get sufficent state and update the exit history entry.
12702 */
12703 RTGCPHYS GCPhys;
12704 int rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
12705 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
12706 AssertRCReturn(rc, rc);
12707
12708 VBOXSTRICTRC rcStrict;
12709 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
12710 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
12711 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
12712 if (!pExitRec)
12713 {
12714 /*
12715 * If we succeed, resume guest execution.
12716 * If we fail in interpreting the instruction because we couldn't get the guest physical address
12717 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
12718 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
12719 * weird case. See @bugref{6043}.
12720 */
12721 PVM pVM = pVCpu->CTX_SUFF(pVM);
12722 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
12723 Log4(("EPT misconfig at %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pMixedCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
12724 if ( rcStrict == VINF_SUCCESS
12725 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
12726 || rcStrict == VERR_PAGE_NOT_PRESENT)
12727 {
12728 /* Successfully handled MMIO operation. */
12729 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
12730 | HM_CHANGED_GUEST_APIC_TPR);
12731 rcStrict = VINF_SUCCESS;
12732 }
12733 }
12734 else
12735 {
12736 /*
12737 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
12738 */
12739 Assert(pMixedCtx == &pVCpu->cpum.GstCtx);
12740 int rc2 = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
12741 AssertRCReturn(rc2, rc2);
12742
12743 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
12744 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
12745
12746 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
12747 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
12748
12749 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
12750 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
12751 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
12752 }
12753 return VBOXSTRICTRC_TODO(rcStrict);
12754}
12755
12756
12757/**
12758 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
12759 * VM-exit.
12760 */
12761HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12762{
12763 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12764 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
12765
12766 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
12767 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
12768 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
12769 {
12770 /* In the unlikely case that the EPT violation happened as a result of delivering an event, log it. */
12771 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
12772 Log4Func(("EPT violation with an event pending u64IntInfo=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo));
12773 }
12774 else
12775 {
12776 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
12777 rcStrict1 = VINF_SUCCESS;
12778 return rcStrict1;
12779 }
12780
12781 RTGCPHYS GCPhys;
12782 int rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
12783 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12784 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
12785 AssertRCReturn(rc, rc);
12786
12787 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
12788 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
12789
12790 RTGCUINT uErrorCode = 0;
12791 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_INSTR_FETCH)
12792 uErrorCode |= X86_TRAP_PF_ID;
12793 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_DATA_WRITE)
12794 uErrorCode |= X86_TRAP_PF_RW;
12795 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT)
12796 uErrorCode |= X86_TRAP_PF_P;
12797
12798 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
12799
12800 Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys,
12801 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
12802
12803 /* Handle the pagefault trap for the nested shadow table. */
12804 PVM pVM = pVCpu->CTX_SUFF(pVM);
12805 VBOXSTRICTRC rcStrict2 = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
12806 TRPMResetTrap(pVCpu);
12807
12808 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
12809 if ( rcStrict2 == VINF_SUCCESS
12810 || rcStrict2 == VERR_PAGE_TABLE_NOT_PRESENT
12811 || rcStrict2 == VERR_PAGE_NOT_PRESENT)
12812 {
12813 /* Successfully synced our nested page tables. */
12814 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
12815 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
12816 return VINF_SUCCESS;
12817 }
12818
12819 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
12820 return rcStrict2;
12821}
12822
12823/** @} */
12824
12825/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
12826/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
12827/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
12828
12829/** @name VM-exit exception handlers.
12830 * @{
12831 */
12832
12833/**
12834 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
12835 */
12836static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12837{
12838 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12839 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
12840
12841 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);
12842 AssertRCReturn(rc, rc);
12843
12844 if (!(pMixedCtx->cr0 & X86_CR0_NE))
12845 {
12846 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
12847 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
12848
12849 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
12850 * provides VM-exit instruction length. If this causes problem later,
12851 * disassemble the instruction like it's done on AMD-V. */
12852 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
12853 AssertRCReturn(rc2, rc2);
12854 return rc;
12855 }
12856
12857 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12858 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
12859 return rc;
12860}
12861
12862
12863/**
12864 * VM-exit exception handler for \#BP (Breakpoint exception).
12865 */
12866static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12867{
12868 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12869 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
12870
12871 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
12872 AssertRCReturn(rc, rc);
12873
12874 rc = DBGFRZTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx));
12875 if (rc == VINF_EM_RAW_GUEST_TRAP)
12876 {
12877 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
12878 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12879 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12880 AssertRCReturn(rc, rc);
12881
12882 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12883 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
12884 }
12885
12886 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
12887 return rc;
12888}
12889
12890
12891/**
12892 * VM-exit exception handler for \#AC (alignment check exception).
12893 */
12894static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12895{
12896 RT_NOREF_PV(pMixedCtx);
12897 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12898
12899 /*
12900 * Re-inject it. We'll detect any nesting before getting here.
12901 */
12902 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12903 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12904 AssertRCReturn(rc, rc);
12905 Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
12906
12907 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12908 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
12909 return VINF_SUCCESS;
12910}
12911
12912
12913/**
12914 * VM-exit exception handler for \#DB (Debug exception).
12915 */
12916static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12917{
12918 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12919 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
12920
12921 /*
12922 * Get the DR6-like values from the VM-exit qualification and pass it to DBGF
12923 * for processing.
12924 */
12925 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12926
12927 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
12928 uint64_t uDR6 = X86_DR6_INIT_VAL;
12929 uDR6 |= ( pVmxTransient->uExitQualification
12930 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
12931
12932 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
12933 Log6Func(("rc=%Rrc\n", rc));
12934 if (rc == VINF_EM_RAW_GUEST_TRAP)
12935 {
12936 /*
12937 * The exception was for the guest. Update DR6, DR7.GD and
12938 * IA32_DEBUGCTL.LBR before forwarding it.
12939 * (See Intel spec. 27.1 "Architectural State before a VM-Exit".)
12940 */
12941 VMMRZCallRing3Disable(pVCpu);
12942 HM_DISABLE_PREEMPT();
12943
12944 pMixedCtx->dr[6] &= ~X86_DR6_B_MASK;
12945 pMixedCtx->dr[6] |= uDR6;
12946 if (CPUMIsGuestDebugStateActive(pVCpu))
12947 ASMSetDR6(pMixedCtx->dr[6]);
12948
12949 HM_RESTORE_PREEMPT();
12950 VMMRZCallRing3Enable(pVCpu);
12951
12952 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_DR7);
12953 AssertRCReturn(rc, rc);
12954
12955 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
12956 pMixedCtx->dr[7] &= ~X86_DR7_GD;
12957
12958 /* Paranoia. */
12959 pMixedCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
12960 pMixedCtx->dr[7] |= X86_DR7_RA1_MASK;
12961
12962 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
12963 AssertRCReturn(rc, rc);
12964
12965 /*
12966 * Raise #DB in the guest.
12967 *
12968 * It is important to reflect exactly what the VM-exit gave us (preserving the
12969 * interruption-type) rather than use hmR0VmxSetPendingXcptDB() as the #DB could've
12970 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
12971 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
12972 *
12973 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
12974 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
12975 */
12976 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
12977 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12978 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12979 AssertRCReturn(rc, rc);
12980 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12981 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
12982 return VINF_SUCCESS;
12983 }
12984
12985 /*
12986 * Not a guest trap, must be a hypervisor related debug event then.
12987 * Update DR6 in case someone is interested in it.
12988 */
12989 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
12990 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
12991 CPUMSetHyperDR6(pVCpu, uDR6);
12992
12993 return rc;
12994}
12995
12996/**
12997 * VM-exit exception handler for \#GP (General-protection exception).
12998 *
12999 * @remarks Requires pVmxTransient->uExitIntInfo to be up-to-date.
13000 */
13001static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
13002{
13003 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
13004 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
13005
13006 int rc;
13007 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
13008 { /* likely */ }
13009 else
13010 {
13011#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
13012 Assert(pVCpu->hm.s.fUsingDebugLoop);
13013#endif
13014 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
13015 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
13016 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
13017 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13018 rc |= hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
13019 AssertRCReturn(rc, rc);
13020 Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip,
13021 pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));
13022 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
13023 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
13024 return rc;
13025 }
13026
13027 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
13028 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
13029
13030 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
13031 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
13032 AssertRCReturn(rc, rc);
13033
13034 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
13035 uint32_t cbOp = 0;
13036 PVM pVM = pVCpu->CTX_SUFF(pVM);
13037 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction;
13038 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
13039 if (RT_SUCCESS(rc))
13040 {
13041 rc = VINF_SUCCESS;
13042 Assert(cbOp == pDis->cbInstr);
13043 Log4Func(("Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
13044 switch (pDis->pCurInstr->uOpcode)
13045 {
13046 case OP_CLI:
13047 {
13048 pMixedCtx->eflags.Bits.u1IF = 0;
13049 pMixedCtx->eflags.Bits.u1RF = 0;
13050 pMixedCtx->rip += pDis->cbInstr;
13051 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13052 if ( !fDbgStepping
13053 && pMixedCtx->eflags.Bits.u1TF)
13054 {
13055 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
13056 AssertRCReturn(rc, rc);
13057 }
13058 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
13059 break;
13060 }
13061
13062 case OP_STI:
13063 {
13064 bool fOldIF = pMixedCtx->eflags.Bits.u1IF;
13065 pMixedCtx->eflags.Bits.u1IF = 1;
13066 pMixedCtx->eflags.Bits.u1RF = 0;
13067 pMixedCtx->rip += pDis->cbInstr;
13068 if (!fOldIF)
13069 {
13070 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
13071 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13072 }
13073 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13074 if ( !fDbgStepping
13075 && pMixedCtx->eflags.Bits.u1TF)
13076 {
13077 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
13078 AssertRCReturn(rc, rc);
13079 }
13080 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
13081 break;
13082 }
13083
13084 case OP_HLT:
13085 {
13086 rc = VINF_EM_HALT;
13087 pMixedCtx->rip += pDis->cbInstr;
13088 pMixedCtx->eflags.Bits.u1RF = 0;
13089 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13090 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
13091 break;
13092 }
13093
13094 case OP_POPF:
13095 {
13096 Log4Func(("POPF CS:EIP %04x:%04RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
13097 uint32_t cbParm;
13098 uint32_t uMask;
13099 bool fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
13100 if (pDis->fPrefix & DISPREFIX_OPSIZE)
13101 {
13102 cbParm = 4;
13103 uMask = 0xffffffff;
13104 }
13105 else
13106 {
13107 cbParm = 2;
13108 uMask = 0xffff;
13109 }
13110
13111 /* Get the stack pointer & pop the contents of the stack onto Eflags. */
13112 RTGCPTR GCPtrStack = 0;
13113 X86EFLAGS Eflags;
13114 Eflags.u32 = 0;
13115 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
13116 &GCPtrStack);
13117 if (RT_SUCCESS(rc))
13118 {
13119 Assert(sizeof(Eflags.u32) >= cbParm);
13120 rc = VBOXSTRICTRC_TODO(PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u32, cbParm, PGMACCESSORIGIN_HM));
13121 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc)); /** @todo allow strict return codes here */
13122 }
13123 if (RT_FAILURE(rc))
13124 {
13125 rc = VERR_EM_INTERPRETER;
13126 break;
13127 }
13128 Log4Func(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
13129 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF))
13130 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask);
13131 pMixedCtx->esp += cbParm;
13132 pMixedCtx->esp &= uMask;
13133 pMixedCtx->rip += pDis->cbInstr;
13134 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
13135 /* Generate a pending-debug exception when the guest stepping over POPF regardless of how
13136 POPF restores EFLAGS.TF. */
13137 if ( !fDbgStepping
13138 && fGstStepping)
13139 {
13140 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
13141 AssertRCReturn(rc, rc);
13142 }
13143 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
13144 break;
13145 }
13146
13147 case OP_PUSHF:
13148 {
13149 uint32_t cbParm;
13150 uint32_t uMask;
13151 if (pDis->fPrefix & DISPREFIX_OPSIZE)
13152 {
13153 cbParm = 4;
13154 uMask = 0xffffffff;
13155 }
13156 else
13157 {
13158 cbParm = 2;
13159 uMask = 0xffff;
13160 }
13161
13162 /* Get the stack pointer & push the contents of eflags onto the stack. */
13163 RTGCPTR GCPtrStack = 0;
13164 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
13165 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
13166 if (RT_FAILURE(rc))
13167 {
13168 rc = VERR_EM_INTERPRETER;
13169 break;
13170 }
13171 X86EFLAGS Eflags = pMixedCtx->eflags;
13172 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
13173 Eflags.Bits.u1RF = 0;
13174 Eflags.Bits.u1VM = 0;
13175
13176 rc = VBOXSTRICTRC_TODO(PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u, cbParm, PGMACCESSORIGIN_HM));
13177 if (RT_UNLIKELY(rc != VINF_SUCCESS))
13178 {
13179 AssertMsgFailed(("%Rrc\n", rc)); /** @todo allow strict return codes here */
13180 rc = VERR_EM_INTERPRETER;
13181 break;
13182 }
13183 Log4Func(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack));
13184 pMixedCtx->esp -= cbParm;
13185 pMixedCtx->esp &= uMask;
13186 pMixedCtx->rip += pDis->cbInstr;
13187 pMixedCtx->eflags.Bits.u1RF = 0;
13188 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
13189 if ( !fDbgStepping
13190 && pMixedCtx->eflags.Bits.u1TF)
13191 {
13192 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
13193 AssertRCReturn(rc, rc);
13194 }
13195 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
13196 break;
13197 }
13198
13199 case OP_IRET:
13200 {
13201 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
13202 * instruction reference. */
13203 RTGCPTR GCPtrStack = 0;
13204 uint32_t uMask = 0xffff;
13205 bool fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
13206 uint16_t aIretFrame[3];
13207 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
13208 {
13209 rc = VERR_EM_INTERPRETER;
13210 break;
13211 }
13212 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
13213 &GCPtrStack);
13214 if (RT_SUCCESS(rc))
13215 {
13216 rc = VBOXSTRICTRC_TODO(PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame),
13217 PGMACCESSORIGIN_HM));
13218 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc)); /** @todo allow strict return codes here */
13219 }
13220 if (RT_FAILURE(rc))
13221 {
13222 rc = VERR_EM_INTERPRETER;
13223 break;
13224 }
13225 pMixedCtx->eip = 0;
13226 pMixedCtx->ip = aIretFrame[0];
13227 pMixedCtx->cs.Sel = aIretFrame[1];
13228 pMixedCtx->cs.ValidSel = aIretFrame[1];
13229 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
13230 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF))
13231 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
13232 pMixedCtx->sp += sizeof(aIretFrame);
13233 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
13234 | HM_CHANGED_GUEST_CS);
13235 /* Generate a pending-debug exception when stepping over IRET regardless of how IRET modifies EFLAGS.TF. */
13236 if ( !fDbgStepping
13237 && fGstStepping)
13238 {
13239 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
13240 AssertRCReturn(rc, rc);
13241 }
13242 Log4Func(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
13243 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
13244 break;
13245 }
13246
13247 case OP_INT:
13248 {
13249 uint16_t uVector = pDis->Param1.uValue & 0xff;
13250 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
13251 /* INT clears EFLAGS.TF, we must not set any pending debug exceptions here. */
13252 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
13253 break;
13254 }
13255
13256 case OP_INTO:
13257 {
13258 if (pMixedCtx->eflags.Bits.u1OF)
13259 {
13260 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
13261 /* INTO clears EFLAGS.TF, we must not set any pending debug exceptions here. */
13262 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
13263 }
13264 else
13265 {
13266 pMixedCtx->eflags.Bits.u1RF = 0;
13267 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
13268 }
13269 break;
13270 }
13271
13272 default:
13273 {
13274 pMixedCtx->eflags.Bits.u1RF = 0; /* This is correct most of the time... */
13275 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
13276 EMCODETYPE_SUPERVISOR);
13277 rc = VBOXSTRICTRC_VAL(rc2);
13278 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
13279 /** @todo We have to set pending-debug exceptions here when the guest is
13280 * single-stepping depending on the instruction that was interpreted. */
13281 Log4Func(("#GP rc=%Rrc\n", rc));
13282 break;
13283 }
13284 }
13285 }
13286 else
13287 rc = VERR_EM_INTERPRETER;
13288
13289 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
13290 ("#GP Unexpected rc=%Rrc\n", rc));
13291 return rc;
13292}
13293
13294
13295/**
13296 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
13297 * the exception reported in the VMX transient structure back into the VM.
13298 *
13299 * @remarks Requires uExitIntInfo in the VMX transient structure to be
13300 * up-to-date.
13301 */
13302static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
13303{
13304 RT_NOREF_PV(pMixedCtx);
13305 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
13306#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
13307 AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVCpu->hm.s.vmx.RealMode.fRealOnV86Active,
13308 ("uVector=%#x u32XcptBitmap=%#X32\n",
13309 VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVCpu->hm.s.vmx.u32XcptBitmap));
13310#endif
13311
13312 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
13313 hmR0VmxCheckExitDueToEventDelivery(). */
13314 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
13315 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13316 AssertRCReturn(rc, rc);
13317 Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
13318
13319#ifdef DEBUG_ramshankar
13320 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
13321 uint8_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
13322 Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip));
13323#endif
13324
13325 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
13326 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
13327 return VINF_SUCCESS;
13328}
13329
13330
13331/**
13332 * VM-exit exception handler for \#PF (Page-fault exception).
13333 */
13334static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
13335{
13336 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
13337 PVM pVM = pVCpu->CTX_SUFF(pVM);
13338 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
13339 rc |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
13340 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
13341 AssertRCReturn(rc, rc);
13342
13343 if (!pVM->hm.s.fNestedPaging)
13344 { /* likely */ }
13345 else
13346 {
13347#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF)
13348 Assert(pVCpu->hm.s.fUsingDebugLoop);
13349#endif
13350 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
13351 if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF))
13352 {
13353 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
13354 0 /* cbInstr */, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQualification);
13355 }
13356 else
13357 {
13358 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
13359 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
13360 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
13361 }
13362 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
13363 return rc;
13364 }
13365
13366 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
13367 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
13368 if (pVmxTransient->fVectoringPF)
13369 {
13370 Assert(pVCpu->hm.s.Event.fPending);
13371 return VINF_EM_RAW_INJECT_TRPM_EVENT;
13372 }
13373
13374 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
13375 AssertRCReturn(rc, rc);
13376
13377 Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
13378 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntErrorCode, pMixedCtx->cr3));
13379
13380 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
13381 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pMixedCtx),
13382 (RTGCPTR)pVmxTransient->uExitQualification);
13383
13384 Log4Func(("#PF: rc=%Rrc\n", rc));
13385 if (rc == VINF_SUCCESS)
13386 {
13387 /*
13388 * This is typically a shadow page table sync or a MMIO instruction. But we may have
13389 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
13390 */
13391 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
13392 TRPMResetTrap(pVCpu);
13393 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
13394 return rc;
13395 }
13396
13397 if (rc == VINF_EM_RAW_GUEST_TRAP)
13398 {
13399 if (!pVmxTransient->fVectoringDoublePF)
13400 {
13401 /* It's a guest page fault and needs to be reflected to the guest. */
13402 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
13403 TRPMResetTrap(pVCpu);
13404 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
13405 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
13406 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
13407 }
13408 else
13409 {
13410 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
13411 TRPMResetTrap(pVCpu);
13412 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
13413 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
13414 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
13415 }
13416
13417 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
13418 return VINF_SUCCESS;
13419 }
13420
13421 TRPMResetTrap(pVCpu);
13422 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
13423 return rc;
13424}
13425
13426/** @} */
13427
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette