VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 72897

Last change on this file since 72897 was 72897, checked in by vboxsync, 6 years ago

hmR0VmxExitInvlpg: build fix

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 578.5 KB
Line 
1/* $Id: HMVMXR0.cpp 72897 2018-07-04 17:19:14Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/x86.h>
25#include <iprt/asm-amd64-x86.h>
26#include <iprt/thread.h>
27
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/dbgf.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/iom.h>
32#include <VBox/vmm/selm.h>
33#include <VBox/vmm/tm.h>
34#include <VBox/vmm/gim.h>
35#include <VBox/vmm/apic.h>
36#ifdef VBOX_WITH_REM
37# include <VBox/vmm/rem.h>
38#endif
39#include "HMInternal.h"
40#include <VBox/vmm/vm.h>
41#include "HMVMXR0.h"
42#include "dtrace/VBoxVMM.h"
43
44#ifdef DEBUG_ramshankar
45# define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS
46# define HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE
47# define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
48# define HMVMX_ALWAYS_CHECK_GUEST_STATE
49# define HMVMX_ALWAYS_TRAP_ALL_XCPTS
50# define HMVMX_ALWAYS_TRAP_PF
51# define HMVMX_ALWAYS_FLUSH_TLB
52# define HMVMX_ALWAYS_SWAP_EFER
53#endif
54
55
56/*********************************************************************************************************************************
57* Defined Constants And Macros *
58*********************************************************************************************************************************/
59/** Use the function table. */
60#define HMVMX_USE_FUNCTION_TABLE
61
62/** Determine which tagged-TLB flush handler to use. */
63#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
64#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
65#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
66#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
67
68/** @name HMVMX_READ_XXX
69 * Flags to skip redundant reads of some common VMCS fields that are not part of
70 * the guest-CPU or VCPU state but are needed while handling VM-exits.
71 */
72#define HMVMX_READ_IDT_VECTORING_INFO RT_BIT_32(0)
73#define HMVMX_READ_IDT_VECTORING_ERROR_CODE RT_BIT_32(1)
74#define HMVMX_READ_EXIT_QUALIFICATION RT_BIT_32(2)
75#define HMVMX_READ_EXIT_INSTR_LEN RT_BIT_32(3)
76#define HMVMX_READ_EXIT_INTERRUPTION_INFO RT_BIT_32(4)
77#define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5)
78#define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6)
79/** @} */
80
81/**
82 * States of the VMCS.
83 *
84 * This does not reflect all possible VMCS states but currently only those
85 * needed for maintaining the VMCS consistently even when thread-context hooks
86 * are used. Maybe later this can be extended (i.e. Nested Virtualization).
87 */
88#define HMVMX_VMCS_STATE_CLEAR RT_BIT(0)
89#define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1)
90#define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2)
91
92/**
93 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
94 * guest using hardware-assisted VMX.
95 *
96 * This excludes state like GPRs (other than RSP) which are always are
97 * swapped and restored across the world-switch and also registers like EFER,
98 * MSR which cannot be modified by the guest without causing a VM-exit.
99 */
100#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
101 | CPUMCTX_EXTRN_RFLAGS \
102 | CPUMCTX_EXTRN_RSP \
103 | CPUMCTX_EXTRN_SREG_MASK \
104 | CPUMCTX_EXTRN_TABLE_MASK \
105 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
106 | CPUMCTX_EXTRN_SYSCALL_MSRS \
107 | CPUMCTX_EXTRN_SYSENTER_MSRS \
108 | CPUMCTX_EXTRN_TSC_AUX \
109 | CPUMCTX_EXTRN_OTHER_MSRS \
110 | CPUMCTX_EXTRN_CR0 \
111 | CPUMCTX_EXTRN_CR3 \
112 | CPUMCTX_EXTRN_CR4 \
113 | CPUMCTX_EXTRN_DR7 \
114 | CPUMCTX_EXTRN_HM_VMX_MASK)
115
116/**
117 * Exception bitmap mask for real-mode guests (real-on-v86).
118 *
119 * We need to intercept all exceptions manually except:
120 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
121 * due to bugs in Intel CPUs.
122 * - \#PF need not be intercepted even in real-mode if we have Nested Paging
123 * support.
124 */
125#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
126 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
127 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
128 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
129 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
130 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
131 | RT_BIT(X86_XCPT_XF))
132
133/** Maximum VM-instruction error number. */
134#define HMVMX_INSTR_ERROR_MAX 28
135
136/** Profiling macro. */
137#ifdef HM_PROFILE_EXIT_DISPATCH
138# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
139# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
140#else
141# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
142# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
143#endif
144
145/** Assert that preemption is disabled or covered by thread-context hooks. */
146#define HMVMX_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHookIsEnabled(pVCpu) \
147 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
148
149/** Assert that we haven't migrated CPUs when thread-context hooks are not
150 * used. */
151#define HMVMX_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHookIsEnabled(pVCpu) \
152 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
153 ("Illegal migration! Entered on CPU %u Current %u\n", \
154 pVCpu->hm.s.idEnteredCpu, RTMpCpuId()))
155
156/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
157 * context. */
158#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
159 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
160 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
161
162/** Helper macro for VM-exit handlers called unexpectedly. */
163#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_pVmxTransient) \
164 do { \
165 (a_pVCpu)->hm.s.u32HMError = (a_pVmxTransient)->uExitReason; \
166 return VERR_VMX_UNEXPECTED_EXIT; \
167 } while (0)
168
169/** Macro for importing segment registers to the VMCS from the guest-CPU context. */
170#ifdef VMX_USE_CACHED_VMCS_ACCESSES
171# define HMVMX_IMPORT_SREG(Sel, a_pCtxSelReg) \
172 hmR0VmxImportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
173 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
174#else
175# define HMVMX_IMPORT_SREG(Sel, a_pCtxSelReg) \
176 hmR0VmxImportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
177 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
178#endif
179
180/** Macro for exporting segment registers to the VMCS from the guest-CPU context. */
181# define HMVMX_EXPORT_SREG(Sel, a_pCtxSelReg) \
182 hmR0VmxExportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
183 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))
184
185
186/*********************************************************************************************************************************
187* Structures and Typedefs *
188*********************************************************************************************************************************/
189/**
190 * VMX transient state.
191 *
192 * A state structure for holding miscellaneous information across
193 * VMX non-root operation and restored after the transition.
194 */
195typedef struct VMXTRANSIENT
196{
197 /** The host's rflags/eflags. */
198 RTCCUINTREG fEFlags;
199#if HC_ARCH_BITS == 32
200 uint32_t u32Alignment0;
201#endif
202 /** The guest's TPR value used for TPR shadowing. */
203 uint8_t u8GuestTpr;
204 /** Alignment. */
205 uint8_t abAlignment0[7];
206
207 /** The basic VM-exit reason. */
208 uint16_t uExitReason;
209 /** Alignment. */
210 uint16_t u16Alignment0;
211 /** The VM-exit interruption error code. */
212 uint32_t uExitIntErrorCode;
213 /** The VM-exit exit code qualification. */
214 uint64_t uExitQualification;
215
216 /** The VM-exit interruption-information field. */
217 uint32_t uExitIntInfo;
218 /** The VM-exit instruction-length field. */
219 uint32_t cbInstr;
220 /** The VM-exit instruction-information field. */
221 union
222 {
223 /** Plain unsigned int representation. */
224 uint32_t u;
225 /** INS and OUTS information. */
226 struct
227 {
228 uint32_t u7Reserved0 : 7;
229 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
230 uint32_t u3AddrSize : 3;
231 uint32_t u5Reserved1 : 5;
232 /** The segment register (X86_SREG_XXX). */
233 uint32_t iSegReg : 3;
234 uint32_t uReserved2 : 14;
235 } StrIo;
236 /** INVEPT, INVVPID, INVPCID information. */
237 struct
238 {
239 /** Scaling; 0=no scaling, 1=scale-by-2, 2=scale-by-4, 3=scale-by-8. */
240 uint32_t u2Scaling : 2;
241 uint32_t u5Reserved0 : 5;
242 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
243 uint32_t u3AddrSize : 3;
244 uint32_t u1Reserved0 : 1;
245 uint32_t u4Reserved0 : 4;
246 /** The segment register (X86_SREG_XXX). */
247 uint32_t iSegReg : 3;
248 /** The index register (X86_GREG_XXX). */
249 uint32_t iIdxReg : 4;
250 /** Set if index register is invalid. */
251 uint32_t fIdxRegValid : 1;
252 /** The base register (X86_GREG_XXX). */
253 uint32_t iBaseReg : 4;
254 /** Set if base register is invalid. */
255 uint32_t fBaseRegValid : 1;
256 /** Register 2 (X86_GREG_XXX). */
257 uint32_t iReg2 : 4;
258 } Inv;
259 } ExitInstrInfo;
260 /** Whether the VM-entry failed or not. */
261 bool fVMEntryFailed;
262 /** Alignment. */
263 uint8_t abAlignment1[3];
264
265 /** The VM-entry interruption-information field. */
266 uint32_t uEntryIntInfo;
267 /** The VM-entry exception error code field. */
268 uint32_t uEntryXcptErrorCode;
269 /** The VM-entry instruction length field. */
270 uint32_t cbEntryInstr;
271
272 /** IDT-vectoring information field. */
273 uint32_t uIdtVectoringInfo;
274 /** IDT-vectoring error code. */
275 uint32_t uIdtVectoringErrorCode;
276
277 /** Mask of currently read VMCS fields; HMVMX_READ_XXX. */
278 uint32_t fVmcsFieldsRead;
279
280 /** Whether the guest debug state was active at the time of VM-exit. */
281 bool fWasGuestDebugStateActive;
282 /** Whether the hyper debug state was active at the time of VM-exit. */
283 bool fWasHyperDebugStateActive;
284 /** Whether TSC-offsetting should be setup before VM-entry. */
285 bool fUpdateTscOffsettingAndPreemptTimer;
286 /** Whether the VM-exit was caused by a page-fault during delivery of a
287 * contributory exception or a page-fault. */
288 bool fVectoringDoublePF;
289 /** Whether the VM-exit was caused by a page-fault during delivery of an
290 * external interrupt or NMI. */
291 bool fVectoringPF;
292} VMXTRANSIENT;
293AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
294AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntInfo, sizeof(uint64_t));
295AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo, sizeof(uint64_t));
296AssertCompileMemberAlignment(VMXTRANSIENT, fWasGuestDebugStateActive, sizeof(uint64_t));
297AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
298/** Pointer to VMX transient state. */
299typedef VMXTRANSIENT *PVMXTRANSIENT;
300
301
302/**
303 * MSR-bitmap read permissions.
304 */
305typedef enum VMXMSREXITREAD
306{
307 /** Reading this MSR causes a VM-exit. */
308 VMXMSREXIT_INTERCEPT_READ = 0xb,
309 /** Reading this MSR does not cause a VM-exit. */
310 VMXMSREXIT_PASSTHRU_READ
311} VMXMSREXITREAD;
312/** Pointer to MSR-bitmap read permissions. */
313typedef VMXMSREXITREAD* PVMXMSREXITREAD;
314
315/**
316 * MSR-bitmap write permissions.
317 */
318typedef enum VMXMSREXITWRITE
319{
320 /** Writing to this MSR causes a VM-exit. */
321 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
322 /** Writing to this MSR does not cause a VM-exit. */
323 VMXMSREXIT_PASSTHRU_WRITE
324} VMXMSREXITWRITE;
325/** Pointer to MSR-bitmap write permissions. */
326typedef VMXMSREXITWRITE* PVMXMSREXITWRITE;
327
328
329/**
330 * VMX VM-exit handler.
331 *
332 * @returns Strict VBox status code (i.e. informational status codes too).
333 * @param pVCpu The cross context virtual CPU structure.
334 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
335 * out-of-sync. Make sure to update the required
336 * fields before using them.
337 * @param pVmxTransient Pointer to the VMX-transient structure.
338 */
339#ifndef HMVMX_USE_FUNCTION_TABLE
340typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
341#else
342typedef DECLCALLBACK(VBOXSTRICTRC) FNVMXEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
343/** Pointer to VM-exit handler. */
344typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
345#endif
346
347/**
348 * VMX VM-exit handler, non-strict status code.
349 *
350 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
351 *
352 * @returns VBox status code, no informational status code returned.
353 * @param pVCpu The cross context virtual CPU structure.
354 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
355 * out-of-sync. Make sure to update the required
356 * fields before using them.
357 * @param pVmxTransient Pointer to the VMX-transient structure.
358 *
359 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
360 * use of that status code will be replaced with VINF_EM_SOMETHING
361 * later when switching over to IEM.
362 */
363#ifndef HMVMX_USE_FUNCTION_TABLE
364typedef int FNVMXEXITHANDLERNSRC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
365#else
366typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
367#endif
368
369
370/*********************************************************************************************************************************
371* Internal Functions *
372*********************************************************************************************************************************/
373static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXTLBFLUSHEPT enmTlbFlush);
374static void hmR0VmxFlushVpid(PVMCPU pVCpu, VMXTLBFLUSHVPID enmTlbFlush, RTGCPTR GCPtr);
375static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu);
376static int hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat);
377static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
378 RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *pfIntrState);
379#if HC_ARCH_BITS == 32
380static int hmR0VmxInitVmcsReadCache(PVMCPU pVCpu);
381#endif
382#ifndef HMVMX_USE_FUNCTION_TABLE
383DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
384# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
385# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
386#else
387# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
388# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
389#endif
390
391
392/** @name VM-exit handlers.
393 * @{
394 */
395static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmi;
396static FNVMXEXITHANDLER hmR0VmxExitExtInt;
397static FNVMXEXITHANDLER hmR0VmxExitTripleFault;
398static FNVMXEXITHANDLERNSRC hmR0VmxExitInitSignal;
399static FNVMXEXITHANDLERNSRC hmR0VmxExitSipi;
400static FNVMXEXITHANDLERNSRC hmR0VmxExitIoSmi;
401static FNVMXEXITHANDLERNSRC hmR0VmxExitSmi;
402static FNVMXEXITHANDLERNSRC hmR0VmxExitIntWindow;
403static FNVMXEXITHANDLERNSRC hmR0VmxExitNmiWindow;
404static FNVMXEXITHANDLER hmR0VmxExitTaskSwitch;
405static FNVMXEXITHANDLER hmR0VmxExitCpuid;
406static FNVMXEXITHANDLER hmR0VmxExitGetsec;
407static FNVMXEXITHANDLER hmR0VmxExitHlt;
408static FNVMXEXITHANDLERNSRC hmR0VmxExitInvd;
409static FNVMXEXITHANDLER hmR0VmxExitInvlpg;
410static FNVMXEXITHANDLER hmR0VmxExitRdpmc;
411static FNVMXEXITHANDLER hmR0VmxExitVmcall;
412static FNVMXEXITHANDLER hmR0VmxExitRdtsc;
413static FNVMXEXITHANDLERNSRC hmR0VmxExitRsm;
414static FNVMXEXITHANDLERNSRC hmR0VmxExitSetPendingXcptUD;
415static FNVMXEXITHANDLER hmR0VmxExitMovCRx;
416static FNVMXEXITHANDLER hmR0VmxExitMovDRx;
417static FNVMXEXITHANDLER hmR0VmxExitIoInstr;
418static FNVMXEXITHANDLER hmR0VmxExitRdmsr;
419static FNVMXEXITHANDLER hmR0VmxExitWrmsr;
420static FNVMXEXITHANDLERNSRC hmR0VmxExitErrInvalidGuestState;
421static FNVMXEXITHANDLERNSRC hmR0VmxExitErrMsrLoad;
422static FNVMXEXITHANDLERNSRC hmR0VmxExitErrUndefined;
423static FNVMXEXITHANDLER hmR0VmxExitMwait;
424static FNVMXEXITHANDLER hmR0VmxExitMtf;
425static FNVMXEXITHANDLER hmR0VmxExitMonitor;
426static FNVMXEXITHANDLER hmR0VmxExitPause;
427static FNVMXEXITHANDLERNSRC hmR0VmxExitErrMachineCheck;
428static FNVMXEXITHANDLERNSRC hmR0VmxExitTprBelowThreshold;
429static FNVMXEXITHANDLER hmR0VmxExitApicAccess;
430static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
431static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
432static FNVMXEXITHANDLER hmR0VmxExitEptViolation;
433static FNVMXEXITHANDLER hmR0VmxExitEptMisconfig;
434static FNVMXEXITHANDLER hmR0VmxExitRdtscp;
435static FNVMXEXITHANDLER hmR0VmxExitPreemptTimer;
436static FNVMXEXITHANDLERNSRC hmR0VmxExitWbinvd;
437static FNVMXEXITHANDLER hmR0VmxExitXsetbv;
438static FNVMXEXITHANDLER hmR0VmxExitRdrand;
439static FNVMXEXITHANDLER hmR0VmxExitInvpcid;
440/** @} */
441
442static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
443static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
444static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
445static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
446static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
447static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
448static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
449static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu, PCPUMCTX pCtx);
450
451
452/*********************************************************************************************************************************
453* Global Variables *
454*********************************************************************************************************************************/
455#ifdef HMVMX_USE_FUNCTION_TABLE
456
457/**
458 * VMX_EXIT dispatch table.
459 */
460static const PFNVMXEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
461{
462 /* 00 VMX_EXIT_XCPT_OR_NMI */ hmR0VmxExitXcptOrNmi,
463 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
464 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
465 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
466 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
467 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
468 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
469 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
470 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
471 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
472 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
473 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
474 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
475 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
476 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
477 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
478 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
479 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
480 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitVmcall,
481 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
482 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
483 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
484 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
485 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
486 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
487 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
488 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
489 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
490 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
491 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
492 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
493 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
494 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
495 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
496 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
497 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
498 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
499 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
500 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
501 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
502 /* 40 UNDEFINED */ hmR0VmxExitPause,
503 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
504 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
505 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
506 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
507 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
508 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
509 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
510 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
511 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
512 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
513 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
514 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
515 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
516 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
517 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
518 /* 56 VMX_EXIT_APIC_WRITE */ hmR0VmxExitErrUndefined,
519 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
520 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
521 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD,
522 /* 60 VMX_EXIT_ENCLS */ hmR0VmxExitErrUndefined,
523 /* 61 VMX_EXIT_RDSEED */ hmR0VmxExitErrUndefined, /* only spurious exits, so undefined */
524 /* 62 VMX_EXIT_PML_FULL */ hmR0VmxExitErrUndefined,
525 /* 63 VMX_EXIT_XSAVES */ hmR0VmxExitSetPendingXcptUD,
526 /* 64 VMX_EXIT_XRSTORS */ hmR0VmxExitSetPendingXcptUD,
527};
528#endif /* HMVMX_USE_FUNCTION_TABLE */
529
530#ifdef VBOX_STRICT
531static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
532{
533 /* 0 */ "(Not Used)",
534 /* 1 */ "VMCALL executed in VMX root operation.",
535 /* 2 */ "VMCLEAR with invalid physical address.",
536 /* 3 */ "VMCLEAR with VMXON pointer.",
537 /* 4 */ "VMLAUNCH with non-clear VMCS.",
538 /* 5 */ "VMRESUME with non-launched VMCS.",
539 /* 6 */ "VMRESUME after VMXOFF",
540 /* 7 */ "VM-entry with invalid control fields.",
541 /* 8 */ "VM-entry with invalid host state fields.",
542 /* 9 */ "VMPTRLD with invalid physical address.",
543 /* 10 */ "VMPTRLD with VMXON pointer.",
544 /* 11 */ "VMPTRLD with incorrect revision identifier.",
545 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
546 /* 13 */ "VMWRITE to read-only VMCS component.",
547 /* 14 */ "(Not Used)",
548 /* 15 */ "VMXON executed in VMX root operation.",
549 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
550 /* 17 */ "VM-entry with non-launched executing VMCS.",
551 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
552 /* 19 */ "VMCALL with non-clear VMCS.",
553 /* 20 */ "VMCALL with invalid VM-exit control fields.",
554 /* 21 */ "(Not Used)",
555 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
556 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
557 /* 24 */ "VMCALL with invalid SMM-monitor features.",
558 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
559 /* 26 */ "VM-entry with events blocked by MOV SS.",
560 /* 27 */ "(Not Used)",
561 /* 28 */ "Invalid operand to INVEPT/INVVPID."
562};
563#endif /* VBOX_STRICT */
564
565
566
567/**
568 * Updates the VM's last error record.
569 *
570 * If there was a VMX instruction error, reads the error data from the VMCS and
571 * updates VCPU's last error record as well.
572 *
573 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
574 * Can be NULL if @a rc is not VERR_VMX_UNABLE_TO_START_VM or
575 * VERR_VMX_INVALID_VMCS_FIELD.
576 * @param rc The error code.
577 */
578static void hmR0VmxUpdateErrorRecord(PVMCPU pVCpu, int rc)
579{
580 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
581 || rc == VERR_VMX_UNABLE_TO_START_VM)
582 {
583 AssertPtrReturnVoid(pVCpu);
584 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
585 }
586 pVCpu->CTX_SUFF(pVM)->hm.s.lLastError = rc;
587}
588
589
590/**
591 * Reads the VM-entry interruption-information field from the VMCS into the VMX
592 * transient structure.
593 *
594 * @returns VBox status code.
595 * @param pVmxTransient Pointer to the VMX transient structure.
596 *
597 * @remarks No-long-jump zone!!!
598 */
599DECLINLINE(int) hmR0VmxReadEntryIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
600{
601 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
602 AssertRCReturn(rc, rc);
603 return VINF_SUCCESS;
604}
605
606#ifdef VBOX_STRICT
607/**
608 * Reads the VM-entry exception error code field from the VMCS into
609 * the VMX transient structure.
610 *
611 * @returns VBox status code.
612 * @param pVmxTransient Pointer to the VMX transient structure.
613 *
614 * @remarks No-long-jump zone!!!
615 */
616DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
617{
618 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
619 AssertRCReturn(rc, rc);
620 return VINF_SUCCESS;
621}
622
623
624/**
625 * Reads the VM-entry exception error code field from the VMCS into
626 * the VMX transient structure.
627 *
628 * @returns VBox status code.
629 * @param pVmxTransient Pointer to the VMX transient structure.
630 *
631 * @remarks No-long-jump zone!!!
632 */
633DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
634{
635 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
636 AssertRCReturn(rc, rc);
637 return VINF_SUCCESS;
638}
639#endif /* VBOX_STRICT */
640
641
642/**
643 * Reads the VM-exit interruption-information field from the VMCS into the VMX
644 * transient structure.
645 *
646 * @returns VBox status code.
647 * @param pVmxTransient Pointer to the VMX transient structure.
648 */
649DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
650{
651 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
652 {
653 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
654 AssertRCReturn(rc,rc);
655 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
656 }
657 return VINF_SUCCESS;
658}
659
660
661/**
662 * Reads the VM-exit interruption error code from the VMCS into the VMX
663 * transient structure.
664 *
665 * @returns VBox status code.
666 * @param pVmxTransient Pointer to the VMX transient structure.
667 */
668DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
669{
670 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
671 {
672 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
673 AssertRCReturn(rc, rc);
674 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
675 }
676 return VINF_SUCCESS;
677}
678
679
680/**
681 * Reads the VM-exit instruction length field from the VMCS into the VMX
682 * transient structure.
683 *
684 * @returns VBox status code.
685 * @param pVmxTransient Pointer to the VMX transient structure.
686 */
687DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
688{
689 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
690 {
691 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
692 AssertRCReturn(rc, rc);
693 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
694 }
695 return VINF_SUCCESS;
696}
697
698
699/**
700 * Reads the VM-exit instruction-information field from the VMCS into
701 * the VMX transient structure.
702 *
703 * @returns VBox status code.
704 * @param pVmxTransient Pointer to the VMX transient structure.
705 */
706DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient)
707{
708 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
709 {
710 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
711 AssertRCReturn(rc, rc);
712 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
713 }
714 return VINF_SUCCESS;
715}
716
717
718/**
719 * Reads the exit code qualification from the VMCS into the VMX transient
720 * structure.
721 *
722 * @returns VBox status code.
723 * @param pVCpu The cross context virtual CPU structure of the
724 * calling EMT. (Required for the VMCS cache case.)
725 * @param pVmxTransient Pointer to the VMX transient structure.
726 */
727DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
728{
729 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
730 {
731 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification); NOREF(pVCpu);
732 AssertRCReturn(rc, rc);
733 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
734 }
735 return VINF_SUCCESS;
736}
737
738
739/**
740 * Reads the IDT-vectoring information field from the VMCS into the VMX
741 * transient structure.
742 *
743 * @returns VBox status code.
744 * @param pVmxTransient Pointer to the VMX transient structure.
745 *
746 * @remarks No-long-jump zone!!!
747 */
748DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
749{
750 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
751 {
752 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
753 AssertRCReturn(rc, rc);
754 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
755 }
756 return VINF_SUCCESS;
757}
758
759
760/**
761 * Reads the IDT-vectoring error code from the VMCS into the VMX
762 * transient structure.
763 *
764 * @returns VBox status code.
765 * @param pVmxTransient Pointer to the VMX transient structure.
766 */
767DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
768{
769 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
770 {
771 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
772 AssertRCReturn(rc, rc);
773 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
774 }
775 return VINF_SUCCESS;
776}
777
778
779/**
780 * Enters VMX root mode operation on the current CPU.
781 *
782 * @returns VBox status code.
783 * @param pVM The cross context VM structure. Can be
784 * NULL, after a resume.
785 * @param HCPhysCpuPage Physical address of the VMXON region.
786 * @param pvCpuPage Pointer to the VMXON region.
787 */
788static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
789{
790 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
791 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
792 Assert(pvCpuPage);
793 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
794
795 if (pVM)
796 {
797 /* Write the VMCS revision dword to the VMXON region. */
798 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
799 }
800
801 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */
802 RTCCUINTREG fEFlags = ASMIntDisableFlags();
803
804 /* Enable the VMX bit in CR4 if necessary. */
805 RTCCUINTREG uOldCr4 = SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);
806
807 /* Enter VMX root mode. */
808 int rc = VMXEnable(HCPhysCpuPage);
809 if (RT_FAILURE(rc))
810 {
811 if (!(uOldCr4 & X86_CR4_VMXE))
812 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
813
814 if (pVM)
815 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
816 }
817
818 /* Restore interrupts. */
819 ASMSetFlags(fEFlags);
820 return rc;
821}
822
823
824/**
825 * Exits VMX root mode operation on the current CPU.
826 *
827 * @returns VBox status code.
828 */
829static int hmR0VmxLeaveRootMode(void)
830{
831 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
832
833 /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */
834 RTCCUINTREG fEFlags = ASMIntDisableFlags();
835
836 /* If we're for some reason not in VMX root mode, then don't leave it. */
837 RTCCUINTREG uHostCR4 = ASMGetCR4();
838
839 int rc;
840 if (uHostCR4 & X86_CR4_VMXE)
841 {
842 /* Exit VMX root mode and clear the VMX bit in CR4. */
843 VMXDisable();
844 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
845 rc = VINF_SUCCESS;
846 }
847 else
848 rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE;
849
850 /* Restore interrupts. */
851 ASMSetFlags(fEFlags);
852 return rc;
853}
854
855
856/**
857 * Allocates and maps one physically contiguous page. The allocated page is
858 * zero'd out. (Used by various VT-x structures).
859 *
860 * @returns IPRT status code.
861 * @param pMemObj Pointer to the ring-0 memory object.
862 * @param ppVirt Where to store the virtual address of the
863 * allocation.
864 * @param pHCPhys Where to store the physical address of the
865 * allocation.
866 */
867static int hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
868{
869 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
870 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
871 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
872
873 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
874 if (RT_FAILURE(rc))
875 return rc;
876 *ppVirt = RTR0MemObjAddress(*pMemObj);
877 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
878 ASMMemZero32(*ppVirt, PAGE_SIZE);
879 return VINF_SUCCESS;
880}
881
882
883/**
884 * Frees and unmaps an allocated physical page.
885 *
886 * @param pMemObj Pointer to the ring-0 memory object.
887 * @param ppVirt Where to re-initialize the virtual address of
888 * allocation as 0.
889 * @param pHCPhys Where to re-initialize the physical address of the
890 * allocation as 0.
891 */
892static void hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
893{
894 AssertPtr(pMemObj);
895 AssertPtr(ppVirt);
896 AssertPtr(pHCPhys);
897 if (*pMemObj != NIL_RTR0MEMOBJ)
898 {
899 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
900 AssertRC(rc);
901 *pMemObj = NIL_RTR0MEMOBJ;
902 *ppVirt = 0;
903 *pHCPhys = 0;
904 }
905}
906
907
908/**
909 * Worker function to free VT-x related structures.
910 *
911 * @returns IPRT status code.
912 * @param pVM The cross context VM structure.
913 */
914static void hmR0VmxStructsFree(PVM pVM)
915{
916 for (VMCPUID i = 0; i < pVM->cCpus; i++)
917 {
918 PVMCPU pVCpu = &pVM->aCpus[i];
919 AssertPtr(pVCpu);
920
921 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
922 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
923
924 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
925 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
926
927 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
928 }
929
930 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
931#ifdef VBOX_WITH_CRASHDUMP_MAGIC
932 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
933#endif
934}
935
936
937/**
938 * Worker function to allocate VT-x related VM structures.
939 *
940 * @returns IPRT status code.
941 * @param pVM The cross context VM structure.
942 */
943static int hmR0VmxStructsAlloc(PVM pVM)
944{
945 /*
946 * Initialize members up-front so we can cleanup properly on allocation failure.
947 */
948#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
949 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
950 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
951 pVM->hm.s.vmx.HCPhys##a_Name = 0;
952
953#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
954 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
955 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
956 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
957
958#ifdef VBOX_WITH_CRASHDUMP_MAGIC
959 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
960#endif
961 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
962
963 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
964 for (VMCPUID i = 0; i < pVM->cCpus; i++)
965 {
966 PVMCPU pVCpu = &pVM->aCpus[i];
967 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
968 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
969 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
970 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
971 }
972#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
973#undef VMXLOCAL_INIT_VM_MEMOBJ
974
975 /* The VMCS size cannot be more than 4096 bytes. See Intel spec. Appendix A.1 "Basic VMX Information". */
976 AssertReturnStmt(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.Msrs.u64BasicInfo) <= PAGE_SIZE,
977 (&pVM->aCpus[0])->hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE,
978 VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO);
979
980 /*
981 * Allocate all the VT-x structures.
982 */
983 int rc = VINF_SUCCESS;
984#ifdef VBOX_WITH_CRASHDUMP_MAGIC
985 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
986 if (RT_FAILURE(rc))
987 goto cleanup;
988 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
989 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
990#endif
991
992 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
993 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
994 {
995 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
996 &pVM->hm.s.vmx.HCPhysApicAccess);
997 if (RT_FAILURE(rc))
998 goto cleanup;
999 }
1000
1001 /*
1002 * Initialize per-VCPU VT-x structures.
1003 */
1004 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1005 {
1006 PVMCPU pVCpu = &pVM->aCpus[i];
1007 AssertPtr(pVCpu);
1008
1009 /* Allocate the VM control structure (VMCS). */
1010 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
1011 if (RT_FAILURE(rc))
1012 goto cleanup;
1013
1014 /* Get the allocated virtual-APIC page from the APIC device for transparent TPR accesses. */
1015 if ( PDMHasApic(pVM)
1016 && (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW))
1017 {
1018 rc = APICGetApicPageForCpu(pVCpu, &pVCpu->hm.s.vmx.HCPhysVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
1019 NULL /* pR3Ptr */, NULL /* pRCPtr */);
1020 if (RT_FAILURE(rc))
1021 goto cleanup;
1022 }
1023
1024 /*
1025 * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for
1026 * transparent accesses of specific MSRs.
1027 *
1028 * If the condition for enabling MSR bitmaps changes here, don't forget to
1029 * update HMAreMsrBitmapsAvailable().
1030 */
1031 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1032 {
1033 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
1034 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1035 if (RT_FAILURE(rc))
1036 goto cleanup;
1037 ASMMemFill32(pVCpu->hm.s.vmx.pvMsrBitmap, PAGE_SIZE, UINT32_C(0xffffffff));
1038 }
1039
1040 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
1041 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
1042 if (RT_FAILURE(rc))
1043 goto cleanup;
1044
1045 /* Allocate the VM-exit MSR-load page for the host MSRs. */
1046 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
1047 if (RT_FAILURE(rc))
1048 goto cleanup;
1049 }
1050
1051 return VINF_SUCCESS;
1052
1053cleanup:
1054 hmR0VmxStructsFree(pVM);
1055 return rc;
1056}
1057
1058
1059/**
1060 * Does global VT-x initialization (called during module initialization).
1061 *
1062 * @returns VBox status code.
1063 */
1064VMMR0DECL(int) VMXR0GlobalInit(void)
1065{
1066#ifdef HMVMX_USE_FUNCTION_TABLE
1067 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
1068# ifdef VBOX_STRICT
1069 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
1070 Assert(g_apfnVMExitHandlers[i]);
1071# endif
1072#endif
1073 return VINF_SUCCESS;
1074}
1075
1076
1077/**
1078 * Does global VT-x termination (called during module termination).
1079 */
1080VMMR0DECL(void) VMXR0GlobalTerm()
1081{
1082 /* Nothing to do currently. */
1083}
1084
1085
1086/**
1087 * Sets up and activates VT-x on the current CPU.
1088 *
1089 * @returns VBox status code.
1090 * @param pHostCpu Pointer to the global CPU info struct.
1091 * @param pVM The cross context VM structure. Can be
1092 * NULL after a host resume operation.
1093 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
1094 * fEnabledByHost is @c true).
1095 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
1096 * @a fEnabledByHost is @c true).
1097 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
1098 * enable VT-x on the host.
1099 * @param pvMsrs Opaque pointer to VMXMSRS struct.
1100 */
1101VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
1102 void *pvMsrs)
1103{
1104 Assert(pHostCpu);
1105 Assert(pvMsrs);
1106 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1107
1108 /* Enable VT-x if it's not already enabled by the host. */
1109 if (!fEnabledByHost)
1110 {
1111 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
1112 if (RT_FAILURE(rc))
1113 return rc;
1114 }
1115
1116 /*
1117 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been
1118 * using EPTPs) so we don't retain any stale guest-physical mappings which won't get
1119 * invalidated when flushing by VPID.
1120 */
1121 PVMXMSRS pMsrs = (PVMXMSRS)pvMsrs;
1122 if (pMsrs->u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1123 {
1124 hmR0VmxFlushEpt(NULL /* pVCpu */, VMXTLBFLUSHEPT_ALL_CONTEXTS);
1125 pHostCpu->fFlushAsidBeforeUse = false;
1126 }
1127 else
1128 pHostCpu->fFlushAsidBeforeUse = true;
1129
1130 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
1131 ++pHostCpu->cTlbFlushes;
1132
1133 return VINF_SUCCESS;
1134}
1135
1136
1137/**
1138 * Deactivates VT-x on the current CPU.
1139 *
1140 * @returns VBox status code.
1141 * @param pHostCpu Pointer to the global CPU info struct.
1142 * @param pvCpuPage Pointer to the VMXON region.
1143 * @param HCPhysCpuPage Physical address of the VMXON region.
1144 *
1145 * @remarks This function should never be called when SUPR0EnableVTx() or
1146 * similar was used to enable VT-x on the host.
1147 */
1148VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
1149{
1150 RT_NOREF3(pHostCpu, pvCpuPage, HCPhysCpuPage);
1151
1152 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1153 return hmR0VmxLeaveRootMode();
1154}
1155
1156
1157/**
1158 * Sets the permission bits for the specified MSR in the MSR bitmap.
1159 *
1160 * @param pVCpu The cross context virtual CPU structure.
1161 * @param uMsr The MSR value.
1162 * @param enmRead Whether reading this MSR causes a VM-exit.
1163 * @param enmWrite Whether writing this MSR causes a VM-exit.
1164 */
1165static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
1166{
1167 int32_t iBit;
1168 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1169
1170 /*
1171 * Layout:
1172 * 0x000 - 0x3ff - Low MSR read bits
1173 * 0x400 - 0x7ff - High MSR read bits
1174 * 0x800 - 0xbff - Low MSR write bits
1175 * 0xc00 - 0xfff - High MSR write bits
1176 */
1177 if (uMsr <= 0x00001FFF)
1178 iBit = uMsr;
1179 else if (uMsr - UINT32_C(0xC0000000) <= UINT32_C(0x00001FFF))
1180 {
1181 iBit = uMsr - UINT32_C(0xC0000000);
1182 pbMsrBitmap += 0x400;
1183 }
1184 else
1185 AssertMsgFailedReturnVoid(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1186
1187 Assert(iBit <= 0x1fff);
1188 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1189 ASMBitSet(pbMsrBitmap, iBit);
1190 else
1191 ASMBitClear(pbMsrBitmap, iBit);
1192
1193 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1194 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1195 else
1196 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1197}
1198
1199
1200#ifdef VBOX_STRICT
1201/**
1202 * Gets the permission bits for the specified MSR in the MSR bitmap.
1203 *
1204 * @returns VBox status code.
1205 * @retval VINF_SUCCESS if the specified MSR is found.
1206 * @retval VERR_NOT_FOUND if the specified MSR is not found.
1207 * @retval VERR_NOT_SUPPORTED if VT-x doesn't allow the MSR.
1208 *
1209 * @param pVCpu The cross context virtual CPU structure.
1210 * @param uMsr The MSR.
1211 * @param penmRead Where to store the read permissions.
1212 * @param penmWrite Where to store the write permissions.
1213 */
1214static int hmR0VmxGetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, PVMXMSREXITREAD penmRead, PVMXMSREXITWRITE penmWrite)
1215{
1216 AssertPtrReturn(penmRead, VERR_INVALID_PARAMETER);
1217 AssertPtrReturn(penmWrite, VERR_INVALID_PARAMETER);
1218 int32_t iBit;
1219 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1220
1221 /* See hmR0VmxSetMsrPermission() for the layout. */
1222 if (uMsr <= 0x00001FFF)
1223 iBit = uMsr;
1224 else if ( uMsr >= 0xC0000000
1225 && uMsr <= 0xC0001FFF)
1226 {
1227 iBit = (uMsr - 0xC0000000);
1228 pbMsrBitmap += 0x400;
1229 }
1230 else
1231 AssertMsgFailedReturn(("hmR0VmxGetMsrPermission: Invalid MSR %#RX32\n", uMsr), VERR_NOT_SUPPORTED);
1232
1233 Assert(iBit <= 0x1fff);
1234 if (ASMBitTest(pbMsrBitmap, iBit))
1235 *penmRead = VMXMSREXIT_INTERCEPT_READ;
1236 else
1237 *penmRead = VMXMSREXIT_PASSTHRU_READ;
1238
1239 if (ASMBitTest(pbMsrBitmap + 0x800, iBit))
1240 *penmWrite = VMXMSREXIT_INTERCEPT_WRITE;
1241 else
1242 *penmWrite = VMXMSREXIT_PASSTHRU_WRITE;
1243 return VINF_SUCCESS;
1244}
1245#endif /* VBOX_STRICT */
1246
1247
1248/**
1249 * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
1250 * area.
1251 *
1252 * @returns VBox status code.
1253 * @param pVCpu The cross context virtual CPU structure.
1254 * @param cMsrs The number of MSRs.
1255 */
1256static int hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)
1257{
1258 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
1259 uint32_t const cMaxSupportedMsrs = MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc);
1260 if (RT_UNLIKELY(cMsrs > cMaxSupportedMsrs))
1261 {
1262 LogRel(("CPU auto-load/store MSR count in VMCS exceeded cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs));
1263 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
1264 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1265 }
1266
1267 /* Update number of guest MSRs to load/store across the world-switch. */
1268 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs);
1269 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs);
1270
1271 /* Update number of host MSRs to load after the world-switch. Identical to guest-MSR count as it's always paired. */
1272 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs);
1273 AssertRCReturn(rc, rc);
1274
1275 /* Update the VCPU's copy of the MSR count. */
1276 pVCpu->hm.s.vmx.cMsrs = cMsrs;
1277
1278 return VINF_SUCCESS;
1279}
1280
1281
1282/**
1283 * Adds a new (or updates the value of an existing) guest/host MSR
1284 * pair to be swapped during the world-switch as part of the
1285 * auto-load/store MSR area in the VMCS.
1286 *
1287 * @returns VBox status code.
1288 * @param pVCpu The cross context virtual CPU structure.
1289 * @param uMsr The MSR.
1290 * @param uGuestMsrValue Value of the guest MSR.
1291 * @param fUpdateHostMsr Whether to update the value of the host MSR if
1292 * necessary.
1293 * @param pfAddedAndUpdated Where to store whether the MSR was added -and-
1294 * its value was updated. Optional, can be NULL.
1295 */
1296static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr,
1297 bool *pfAddedAndUpdated)
1298{
1299 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1300 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1301 uint32_t i;
1302 for (i = 0; i < cMsrs; i++)
1303 {
1304 if (pGuestMsr->u32Msr == uMsr)
1305 break;
1306 pGuestMsr++;
1307 }
1308
1309 bool fAdded = false;
1310 if (i == cMsrs)
1311 {
1312 ++cMsrs;
1313 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1314 AssertMsgRCReturn(rc, ("hmR0VmxAddAutoLoadStoreMsr: Insufficient space to add MSR %u\n", uMsr), rc);
1315
1316 /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */
1317 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1318 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1319
1320 fAdded = true;
1321 }
1322
1323 /* Update the MSR values in the auto-load/store MSR area. */
1324 pGuestMsr->u32Msr = uMsr;
1325 pGuestMsr->u64Value = uGuestMsrValue;
1326
1327 /* Create/update the MSR slot in the host MSR area. */
1328 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1329 pHostMsr += i;
1330 pHostMsr->u32Msr = uMsr;
1331
1332 /*
1333 * Update the host MSR only when requested by the caller AND when we're
1334 * adding it to the auto-load/store area. Otherwise, it would have been
1335 * updated by hmR0VmxExportHostMsrs(). We do this for performance reasons.
1336 */
1337 bool fUpdatedMsrValue = false;
1338 if ( fAdded
1339 && fUpdateHostMsr)
1340 {
1341 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1342 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1343 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1344 fUpdatedMsrValue = true;
1345 }
1346
1347 if (pfAddedAndUpdated)
1348 *pfAddedAndUpdated = fUpdatedMsrValue;
1349 return VINF_SUCCESS;
1350}
1351
1352
1353/**
1354 * Removes a guest/host MSR pair to be swapped during the world-switch from the
1355 * auto-load/store MSR area in the VMCS.
1356 *
1357 * @returns VBox status code.
1358 * @param pVCpu The cross context virtual CPU structure.
1359 * @param uMsr The MSR.
1360 */
1361static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr)
1362{
1363 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1364 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1365 for (uint32_t i = 0; i < cMsrs; i++)
1366 {
1367 /* Find the MSR. */
1368 if (pGuestMsr->u32Msr == uMsr)
1369 {
1370 /* If it's the last MSR, simply reduce the count. */
1371 if (i == cMsrs - 1)
1372 {
1373 --cMsrs;
1374 break;
1375 }
1376
1377 /* Remove it by swapping the last MSR in place of it, and reducing the count. */
1378 PVMXAUTOMSR pLastGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1379 pLastGuestMsr += cMsrs - 1;
1380 pGuestMsr->u32Msr = pLastGuestMsr->u32Msr;
1381 pGuestMsr->u64Value = pLastGuestMsr->u64Value;
1382
1383 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1384 PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1385 pLastHostMsr += cMsrs - 1;
1386 pHostMsr->u32Msr = pLastHostMsr->u32Msr;
1387 pHostMsr->u64Value = pLastHostMsr->u64Value;
1388 --cMsrs;
1389 break;
1390 }
1391 pGuestMsr++;
1392 }
1393
1394 /* Update the VMCS if the count changed (meaning the MSR was found). */
1395 if (cMsrs != pVCpu->hm.s.vmx.cMsrs)
1396 {
1397 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
1398 AssertRCReturn(rc, rc);
1399
1400 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
1401 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1402 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
1403
1404 Log4Func(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));
1405 return VINF_SUCCESS;
1406 }
1407
1408 return VERR_NOT_FOUND;
1409}
1410
1411
1412/**
1413 * Checks if the specified guest MSR is part of the auto-load/store area in
1414 * the VMCS.
1415 *
1416 * @returns true if found, false otherwise.
1417 * @param pVCpu The cross context virtual CPU structure.
1418 * @param uMsr The MSR to find.
1419 */
1420static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1421{
1422 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1423 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1424
1425 for (uint32_t i = 0; i < cMsrs; i++, pGuestMsr++)
1426 {
1427 if (pGuestMsr->u32Msr == uMsr)
1428 return true;
1429 }
1430 return false;
1431}
1432
1433
1434/**
1435 * Updates the value of all host MSRs in the auto-load/store area in the VMCS.
1436 *
1437 * @param pVCpu The cross context virtual CPU structure.
1438 *
1439 * @remarks No-long-jump zone!!!
1440 */
1441static void hmR0VmxUpdateAutoLoadStoreHostMsrs(PVMCPU pVCpu)
1442{
1443 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1444 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1445 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1446 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;
1447
1448 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1449 {
1450 AssertReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr);
1451
1452 /*
1453 * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
1454 * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
1455 */
1456 if (pHostMsr->u32Msr == MSR_K6_EFER)
1457 pHostMsr->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostEfer;
1458 else
1459 pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
1460 }
1461
1462 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
1463}
1464
1465
1466/**
1467 * Saves a set of host MSRs to allow read/write passthru access to the guest and
1468 * perform lazy restoration of the host MSRs while leaving VT-x.
1469 *
1470 * @param pVCpu The cross context virtual CPU structure.
1471 *
1472 * @remarks No-long-jump zone!!!
1473 */
1474static void hmR0VmxLazySaveHostMsrs(PVMCPU pVCpu)
1475{
1476 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1477
1478 /*
1479 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls().
1480 */
1481 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST))
1482 {
1483 Assert(!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)); /* Guest MSRs better not be loaded now. */
1484#if HC_ARCH_BITS == 64
1485 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
1486 {
1487 pVCpu->hm.s.vmx.u64HostLStarMsr = ASMRdMsr(MSR_K8_LSTAR);
1488 pVCpu->hm.s.vmx.u64HostStarMsr = ASMRdMsr(MSR_K6_STAR);
1489 pVCpu->hm.s.vmx.u64HostSFMaskMsr = ASMRdMsr(MSR_K8_SF_MASK);
1490 pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1491 }
1492#endif
1493 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;
1494 }
1495}
1496
1497
1498/**
1499 * Checks whether the MSR belongs to the set of guest MSRs that we restore
1500 * lazily while leaving VT-x.
1501 *
1502 * @returns true if it does, false otherwise.
1503 * @param pVCpu The cross context virtual CPU structure.
1504 * @param uMsr The MSR to check.
1505 */
1506static bool hmR0VmxIsLazyGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
1507{
1508 NOREF(pVCpu);
1509#if HC_ARCH_BITS == 64
1510 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
1511 {
1512 switch (uMsr)
1513 {
1514 case MSR_K8_LSTAR:
1515 case MSR_K6_STAR:
1516 case MSR_K8_SF_MASK:
1517 case MSR_K8_KERNEL_GS_BASE:
1518 return true;
1519 }
1520 }
1521#else
1522 RT_NOREF(pVCpu, uMsr);
1523#endif
1524 return false;
1525}
1526
1527
1528/**
1529 * Loads a set of guests MSRs to allow read/passthru to the guest.
1530 *
1531 * The name of this function is slightly confusing. This function does NOT
1532 * postpone loading, but loads the MSR right now. "hmR0VmxLazy" is simply a
1533 * common prefix for functions dealing with "lazy restoration" of the shared
1534 * MSRs.
1535 *
1536 * @param pVCpu The cross context virtual CPU structure.
1537 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
1538 * out-of-sync. Make sure to update the required fields
1539 * before using them.
1540 *
1541 * @remarks No-long-jump zone!!!
1542 */
1543static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
1544{
1545 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1546 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1547
1548 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1549#if HC_ARCH_BITS == 64
1550 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
1551 {
1552 /*
1553 * If the guest MSRs are not loaded -and- if all the guest MSRs are identical
1554 * to the MSRs on the CPU (which are the saved host MSRs, see assertion above) then
1555 * we can skip a few MSR writes.
1556 *
1557 * Otherwise, it implies either 1. they're not loaded, or 2. they're loaded but the
1558 * guest MSR values in the guest-CPU context might be different to what's currently
1559 * loaded in the CPU. In either case, we need to write the new guest MSR values to the
1560 * CPU, see @bugref{8728}.
1561 */
1562 if ( !(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1563 && pMixedCtx->msrKERNELGSBASE == pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr
1564 && pMixedCtx->msrLSTAR == pVCpu->hm.s.vmx.u64HostLStarMsr
1565 && pMixedCtx->msrSTAR == pVCpu->hm.s.vmx.u64HostStarMsr
1566 && pMixedCtx->msrSFMASK == pVCpu->hm.s.vmx.u64HostSFMaskMsr)
1567 {
1568#ifdef VBOX_STRICT
1569 Assert(ASMRdMsr(MSR_K8_KERNEL_GS_BASE) == pMixedCtx->msrKERNELGSBASE);
1570 Assert(ASMRdMsr(MSR_K8_LSTAR) == pMixedCtx->msrLSTAR);
1571 Assert(ASMRdMsr(MSR_K6_STAR) == pMixedCtx->msrSTAR);
1572 Assert(ASMRdMsr(MSR_K8_SF_MASK) == pMixedCtx->msrSFMASK);
1573#endif
1574 }
1575 else
1576 {
1577 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE);
1578 ASMWrMsr(MSR_K8_LSTAR, pMixedCtx->msrLSTAR);
1579 ASMWrMsr(MSR_K6_STAR, pMixedCtx->msrSTAR);
1580 ASMWrMsr(MSR_K8_SF_MASK, pMixedCtx->msrSFMASK);
1581 }
1582 }
1583#else
1584 RT_NOREF(pMixedCtx);
1585#endif
1586 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;
1587}
1588
1589
1590/**
1591 * Performs lazy restoration of the set of host MSRs if they were previously
1592 * loaded with guest MSR values.
1593 *
1594 * @param pVCpu The cross context virtual CPU structure.
1595 *
1596 * @remarks No-long-jump zone!!!
1597 * @remarks The guest MSRs should have been saved back into the guest-CPU
1598 * context by hmR0VmxImportGuestState()!!!
1599 */
1600static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu)
1601{
1602 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1603 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1604
1605 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1606 {
1607 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1608#if HC_ARCH_BITS == 64
1609 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
1610 {
1611 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64HostLStarMsr);
1612 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64HostStarMsr);
1613 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64HostSFMaskMsr);
1614 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr);
1615 }
1616#endif
1617 }
1618 pVCpu->hm.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST);
1619}
1620
1621
1622/**
1623 * Verifies that our cached values of the VMCS fields are all consistent with
1624 * what's actually present in the VMCS.
1625 *
1626 * @returns VBox status code.
1627 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
1628 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
1629 * VMCS content. HMCPU error-field is
1630 * updated, see VMX_VCI_XXX.
1631 * @param pVCpu The cross context virtual CPU structure.
1632 */
1633static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu)
1634{
1635 uint32_t u32Val;
1636 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
1637 AssertRCReturn(rc, rc);
1638 AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32EntryCtls == u32Val,
1639 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32EntryCtls, u32Val),
1640 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_ENTRY,
1641 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1642
1643 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
1644 AssertRCReturn(rc, rc);
1645 AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32ExitCtls == u32Val,
1646 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32ExitCtls, u32Val),
1647 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_EXIT,
1648 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1649
1650 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
1651 AssertRCReturn(rc, rc);
1652 AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32PinCtls == u32Val,
1653 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32PinCtls, u32Val),
1654 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PIN_EXEC,
1655 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1656
1657 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
1658 AssertRCReturn(rc, rc);
1659 AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32ProcCtls == u32Val,
1660 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32ProcCtls, u32Val),
1661 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC,
1662 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1663
1664 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1665 {
1666 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
1667 AssertRCReturn(rc, rc);
1668 AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32ProcCtls2 == u32Val,
1669 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32ProcCtls2, u32Val),
1670 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
1671 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1672 }
1673
1674 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
1675 AssertRCReturn(rc, rc);
1676 AssertMsgReturnStmt(pVCpu->hm.s.vmx.u32XcptBitmap == u32Val,
1677 ("Cache=%#RX32 VMCS=%#RX32\n", pVCpu->hm.s.vmx.u32XcptBitmap, u32Val),
1678 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
1679 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1680
1681 uint64_t u64Val;
1682 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
1683 AssertRCReturn(rc, rc);
1684 AssertMsgReturnStmt(pVCpu->hm.s.vmx.u64TscOffset == u64Val,
1685 ("Cache=%#RX64 VMCS=%#RX64\n", pVCpu->hm.s.vmx.u64TscOffset, u64Val),
1686 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
1687 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
1688
1689 return VINF_SUCCESS;
1690}
1691
1692
1693#ifdef VBOX_STRICT
1694/**
1695 * Verifies that our cached host EFER value has not changed
1696 * since we cached it.
1697 *
1698 * @param pVCpu The cross context virtual CPU structure.
1699 */
1700static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu)
1701{
1702 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1703
1704 if (pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
1705 {
1706 uint64_t u64Val;
1707 int rc = VMXReadVmcs64(VMX_VMCS64_HOST_EFER_FULL, &u64Val);
1708 AssertRC(rc);
1709
1710 uint64_t u64HostEferMsr = ASMRdMsr(MSR_K6_EFER);
1711 AssertMsgReturnVoid(u64HostEferMsr == u64Val, ("u64HostEferMsr=%#RX64 u64Val=%#RX64\n", u64HostEferMsr, u64Val));
1712 }
1713}
1714
1715
1716/**
1717 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the
1718 * VMCS are correct.
1719 *
1720 * @param pVCpu The cross context virtual CPU structure.
1721 */
1722static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu)
1723{
1724 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1725
1726 /* Verify MSR counts in the VMCS are what we think it should be. */
1727 uint32_t cMsrs;
1728 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1729 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1730
1731 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs); AssertRC(rc);
1732 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1733
1734 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs); AssertRC(rc);
1735 Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
1736
1737 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
1738 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
1739 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
1740 {
1741 /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */
1742 AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32 cMsrs=%u\n", pHostMsr->u32Msr,
1743 pGuestMsr->u32Msr, cMsrs));
1744
1745 uint64_t u64Msr = ASMRdMsr(pHostMsr->u32Msr);
1746 AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
1747 pHostMsr->u32Msr, pHostMsr->u64Value, u64Msr, cMsrs));
1748
1749 /* Verify that the permissions are as expected in the MSR bitmap. */
1750 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1751 {
1752 VMXMSREXITREAD enmRead;
1753 VMXMSREXITWRITE enmWrite;
1754 rc = hmR0VmxGetMsrPermission(pVCpu, pGuestMsr->u32Msr, &enmRead, &enmWrite);
1755 AssertMsgReturnVoid(rc == VINF_SUCCESS, ("hmR0VmxGetMsrPermission! failed. rc=%Rrc\n", rc));
1756 if (pGuestMsr->u32Msr == MSR_K6_EFER)
1757 {
1758 AssertMsgReturnVoid(enmRead == VMXMSREXIT_INTERCEPT_READ, ("Passthru read for EFER!?\n"));
1759 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_INTERCEPT_WRITE, ("Passthru write for EFER!?\n"));
1760 }
1761 else
1762 {
1763 AssertMsgReturnVoid(enmRead == VMXMSREXIT_PASSTHRU_READ, ("u32Msr=%#RX32 cMsrs=%u No passthru read!\n",
1764 pGuestMsr->u32Msr, cMsrs));
1765 AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 cMsrs=%u No passthru write!\n",
1766 pGuestMsr->u32Msr, cMsrs));
1767 }
1768 }
1769 }
1770}
1771#endif /* VBOX_STRICT */
1772
1773
1774/**
1775 * Flushes the TLB using EPT.
1776 *
1777 * @returns VBox status code.
1778 * @param pVCpu The cross context virtual CPU structure of the calling
1779 * EMT. Can be NULL depending on @a enmTlbFlush.
1780 * @param enmTlbFlush Type of flush.
1781 *
1782 * @remarks Caller is responsible for making sure this function is called only
1783 * when NestedPaging is supported and providing @a enmTlbFlush that is
1784 * supported by the CPU.
1785 * @remarks Can be called with interrupts disabled.
1786 */
1787static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMXTLBFLUSHEPT enmTlbFlush)
1788{
1789 uint64_t au64Descriptor[2];
1790 if (enmTlbFlush == VMXTLBFLUSHEPT_ALL_CONTEXTS)
1791 au64Descriptor[0] = 0;
1792 else
1793 {
1794 Assert(pVCpu);
1795 au64Descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1796 }
1797 au64Descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1798
1799 int rc = VMXR0InvEPT(enmTlbFlush, &au64Descriptor[0]);
1800 AssertMsg(rc == VINF_SUCCESS,
1801 ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmTlbFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0, rc));
1802
1803 if ( RT_SUCCESS(rc)
1804 && pVCpu)
1805 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1806}
1807
1808
1809/**
1810 * Flushes the TLB using VPID.
1811 *
1812 * @returns VBox status code.
1813 * @param pVCpu The cross context virtual CPU structure of the calling
1814 * EMT. Can be NULL depending on @a enmTlbFlush.
1815 * @param enmTlbFlush Type of flush.
1816 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1817 * on @a enmTlbFlush).
1818 *
1819 * @remarks Can be called with interrupts disabled.
1820 */
1821static void hmR0VmxFlushVpid(PVMCPU pVCpu, VMXTLBFLUSHVPID enmTlbFlush, RTGCPTR GCPtr)
1822{
1823 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid);
1824
1825 uint64_t au64Descriptor[2];
1826 if (enmTlbFlush == VMXTLBFLUSHVPID_ALL_CONTEXTS)
1827 {
1828 au64Descriptor[0] = 0;
1829 au64Descriptor[1] = 0;
1830 }
1831 else
1832 {
1833 AssertPtr(pVCpu);
1834 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1835 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1836 au64Descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1837 au64Descriptor[1] = GCPtr;
1838 }
1839
1840 int rc = VMXR0InvVPID(enmTlbFlush, &au64Descriptor[0]);
1841 AssertMsg(rc == VINF_SUCCESS,
1842 ("VMXR0InvVPID %#x %u %RGv failed with %Rrc\n", enmTlbFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1843
1844 if ( RT_SUCCESS(rc)
1845 && pVCpu)
1846 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1847 NOREF(rc);
1848}
1849
1850
1851/**
1852 * Invalidates a guest page by guest virtual address. Only relevant for
1853 * EPT/VPID, otherwise there is nothing really to invalidate.
1854 *
1855 * @returns VBox status code.
1856 * @param pVCpu The cross context virtual CPU structure.
1857 * @param GCVirt Guest virtual address of the page to invalidate.
1858 */
1859VMMR0DECL(int) VMXR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
1860{
1861 AssertPtr(pVCpu);
1862 LogFlowFunc(("pVCpu=%p GCVirt=%RGv\n", pVCpu, GCVirt));
1863
1864 bool fFlushPending = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_FLUSH);
1865 if (!fFlushPending)
1866 {
1867 /*
1868 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for
1869 * the EPT case. See @bugref{6043} and @bugref{6177}.
1870 *
1871 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*()
1872 * as this function maybe called in a loop with individual addresses.
1873 */
1874 PVM pVM = pVCpu->CTX_SUFF(pVM);
1875 if (pVM->hm.s.vmx.fVpid)
1876 {
1877 bool fVpidFlush = RT_BOOL(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
1878
1879#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
1880 /*
1881 * Workaround Erratum BV75, AAJ159 and others that affect several Intel CPUs
1882 * where executing INVVPID outside 64-bit mode does not flush translations of
1883 * 64-bit linear addresses, see @bugref{6208#c72}.
1884 */
1885 if (RT_HI_U32(GCVirt))
1886 fVpidFlush = false;
1887#endif
1888
1889 if (fVpidFlush)
1890 {
1891 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_INDIV_ADDR, GCVirt);
1892 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1893 }
1894 else
1895 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1896 }
1897 else if (pVM->hm.s.fNestedPaging)
1898 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1899 }
1900
1901 return VINF_SUCCESS;
1902}
1903
1904
1905/**
1906 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1907 * case where neither EPT nor VPID is supported by the CPU.
1908 *
1909 * @param pVCpu The cross context virtual CPU structure.
1910 * @param pCpu Pointer to the global HM struct.
1911 *
1912 * @remarks Called with interrupts disabled.
1913 */
1914static void hmR0VmxFlushTaggedTlbNone(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1915{
1916 AssertPtr(pVCpu);
1917 AssertPtr(pCpu);
1918
1919 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1920
1921 Assert(pCpu->idCpu != NIL_RTCPUID);
1922 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1923 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1924 pVCpu->hm.s.fForceTLBFlush = false;
1925 return;
1926}
1927
1928
1929/**
1930 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1931 *
1932 * @param pVCpu The cross context virtual CPU structure.
1933 * @param pCpu Pointer to the global HM CPU struct.
1934 *
1935 * @remarks All references to "ASID" in this function pertains to "VPID" in Intel's
1936 * nomenclature. The reason is, to avoid confusion in compare statements
1937 * since the host-CPU copies are named "ASID".
1938 *
1939 * @remarks Called with interrupts disabled.
1940 */
1941static void hmR0VmxFlushTaggedTlbBoth(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
1942{
1943#ifdef VBOX_WITH_STATISTICS
1944 bool fTlbFlushed = false;
1945# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
1946# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
1947 if (!fTlbFlushed) \
1948 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
1949 } while (0)
1950#else
1951# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
1952# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
1953#endif
1954
1955 AssertPtr(pCpu);
1956 AssertPtr(pVCpu);
1957 Assert(pCpu->idCpu != NIL_RTCPUID);
1958
1959 PVM pVM = pVCpu->CTX_SUFF(pVM);
1960 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1961 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1962 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1963
1964 /*
1965 * Force a TLB flush for the first world-switch if the current CPU differs from the one we
1966 * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID
1967 * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we
1968 * cannot reuse the current ASID anymore.
1969 */
1970 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1971 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1972 {
1973 ++pCpu->uCurrentAsid;
1974 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1975 {
1976 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
1977 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1978 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1979 }
1980
1981 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1982 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1983 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1984
1985 /*
1986 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
1987 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
1988 */
1989 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmTlbFlushEpt);
1990 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1991 HMVMX_SET_TAGGED_TLB_FLUSHED();
1992 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); /* Already flushed-by-EPT, skip doing it again below. */
1993 }
1994
1995 /* Check for explicit TLB flushes. */
1996 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1997 {
1998 /*
1999 * Changes to the EPT paging structure by VMM requires flushing-by-EPT as the CPU
2000 * creates guest-physical (ie. only EPT-tagged) mappings while traversing the EPT
2001 * tables when EPT is in use. Flushing-by-VPID will only flush linear (only
2002 * VPID-tagged) and combined (EPT+VPID tagged) mappings but not guest-physical
2003 * mappings, see @bugref{6568}.
2004 *
2005 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information".
2006 */
2007 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmTlbFlushEpt);
2008 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2009 HMVMX_SET_TAGGED_TLB_FLUSHED();
2010 }
2011
2012 pVCpu->hm.s.fForceTLBFlush = false;
2013 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
2014
2015 Assert(pVCpu->hm.s.idLastCpu == pCpu->idCpu);
2016 Assert(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes);
2017 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2018 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2019 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2020 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
2021 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2022 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2023 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2024
2025 /* Update VMCS with the VPID. */
2026 int rc = VMXWriteVmcs32(VMX_VMCS16_VPID, pVCpu->hm.s.uCurrentAsid);
2027 AssertRC(rc);
2028
2029#undef HMVMX_SET_TAGGED_TLB_FLUSHED
2030}
2031
2032
2033/**
2034 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
2035 *
2036 * @returns VBox status code.
2037 * @param pVCpu The cross context virtual CPU structure.
2038 * @param pCpu Pointer to the global HM CPU struct.
2039 *
2040 * @remarks Called with interrupts disabled.
2041 */
2042static void hmR0VmxFlushTaggedTlbEpt(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2043{
2044 AssertPtr(pVCpu);
2045 AssertPtr(pCpu);
2046 Assert(pCpu->idCpu != NIL_RTCPUID);
2047 AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked without NestedPaging."));
2048 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID."));
2049
2050 /*
2051 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
2052 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
2053 */
2054 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2055 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2056 {
2057 pVCpu->hm.s.fForceTLBFlush = true;
2058 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2059 }
2060
2061 /* Check for explicit TLB flushes. */
2062 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2063 {
2064 pVCpu->hm.s.fForceTLBFlush = true;
2065 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2066 }
2067
2068 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2069 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2070
2071 if (pVCpu->hm.s.fForceTLBFlush)
2072 {
2073 hmR0VmxFlushEpt(pVCpu, pVCpu->CTX_SUFF(pVM)->hm.s.vmx.enmTlbFlushEpt);
2074 pVCpu->hm.s.fForceTLBFlush = false;
2075 }
2076}
2077
2078
2079/**
2080 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
2081 *
2082 * @returns VBox status code.
2083 * @param pVCpu The cross context virtual CPU structure.
2084 * @param pCpu Pointer to the global HM CPU struct.
2085 *
2086 * @remarks Called with interrupts disabled.
2087 */
2088static void hmR0VmxFlushTaggedTlbVpid(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2089{
2090 AssertPtr(pVCpu);
2091 AssertPtr(pCpu);
2092 Assert(pCpu->idCpu != NIL_RTCPUID);
2093 AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked without VPID."));
2094 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging"));
2095
2096 /*
2097 * Force a TLB flush for the first world switch if the current CPU differs from the one we
2098 * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID
2099 * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we
2100 * cannot reuse the current ASID anymore.
2101 */
2102 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2103 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2104 {
2105 pVCpu->hm.s.fForceTLBFlush = true;
2106 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2107 }
2108
2109 /* Check for explicit TLB flushes. */
2110 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2111 {
2112 /*
2113 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see
2114 * hmR0VmxSetupTaggedTlb()) we would need to explicitly flush in this case (add an
2115 * fExplicitFlush = true here and change the pCpu->fFlushAsidBeforeUse check below to
2116 * include fExplicitFlush's too) - an obscure corner case.
2117 */
2118 pVCpu->hm.s.fForceTLBFlush = true;
2119 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2120 }
2121
2122 PVM pVM = pVCpu->CTX_SUFF(pVM);
2123 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2124 if (pVCpu->hm.s.fForceTLBFlush)
2125 {
2126 ++pCpu->uCurrentAsid;
2127 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
2128 {
2129 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
2130 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
2131 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
2132 }
2133
2134 pVCpu->hm.s.fForceTLBFlush = false;
2135 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2136 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
2137 if (pCpu->fFlushAsidBeforeUse)
2138 {
2139 if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
2140 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
2141 else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS)
2142 {
2143 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
2144 pCpu->fFlushAsidBeforeUse = false;
2145 }
2146 else
2147 {
2148 /* hmR0VmxSetupTaggedTlb() ensures we never get here. Paranoia. */
2149 AssertMsgFailed(("Unsupported VPID-flush context type.\n"));
2150 }
2151 }
2152 }
2153
2154 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2155 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2156 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2157 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pCpu->idCpu,
2158 pCpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2159 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2160 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2161
2162 int rc = VMXWriteVmcs32(VMX_VMCS16_VPID, pVCpu->hm.s.uCurrentAsid);
2163 AssertRC(rc);
2164}
2165
2166
2167/**
2168 * Flushes the guest TLB entry based on CPU capabilities.
2169 *
2170 * @param pVCpu The cross context virtual CPU structure.
2171 * @param pCpu Pointer to the global HM CPU struct.
2172 */
2173DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
2174{
2175#ifdef HMVMX_ALWAYS_FLUSH_TLB
2176 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2177#endif
2178 PVM pVM = pVCpu->CTX_SUFF(pVM);
2179 switch (pVM->hm.s.vmx.enmTlbFlushType)
2180 {
2181 case VMXTLBFLUSHTYPE_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVCpu, pCpu); break;
2182 case VMXTLBFLUSHTYPE_EPT: hmR0VmxFlushTaggedTlbEpt(pVCpu, pCpu); break;
2183 case VMXTLBFLUSHTYPE_VPID: hmR0VmxFlushTaggedTlbVpid(pVCpu, pCpu); break;
2184 case VMXTLBFLUSHTYPE_NONE: hmR0VmxFlushTaggedTlbNone(pVCpu, pCpu); break;
2185 default:
2186 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
2187 break;
2188 }
2189 /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */
2190}
2191
2192
2193/**
2194 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
2195 * TLB entries from the host TLB before VM-entry.
2196 *
2197 * @returns VBox status code.
2198 * @param pVM The cross context VM structure.
2199 */
2200static int hmR0VmxSetupTaggedTlb(PVM pVM)
2201{
2202 /*
2203 * Determine optimal flush type for Nested Paging.
2204 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
2205 * guest execution (see hmR3InitFinalizeR0()).
2206 */
2207 if (pVM->hm.s.fNestedPaging)
2208 {
2209 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
2210 {
2211 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
2212 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_SINGLE_CONTEXT;
2213 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
2214 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_ALL_CONTEXTS;
2215 else
2216 {
2217 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
2218 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2219 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_FLUSH_TYPE_UNSUPPORTED;
2220 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2221 }
2222
2223 /* Make sure the write-back cacheable memory type for EPT is supported. */
2224 if (RT_UNLIKELY(!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB)))
2225 {
2226 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2227 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_MEM_TYPE_NOT_WB;
2228 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2229 }
2230
2231 /* EPT requires a page-walk length of 4. */
2232 if (RT_UNLIKELY(!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4)))
2233 {
2234 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2235 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_PAGE_WALK_LENGTH_UNSUPPORTED;
2236 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2237 }
2238 }
2239 else
2240 {
2241 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
2242 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2243 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_INVEPT_UNAVAILABLE;
2244 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2245 }
2246 }
2247
2248 /*
2249 * Determine optimal flush type for VPID.
2250 */
2251 if (pVM->hm.s.vmx.fVpid)
2252 {
2253 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
2254 {
2255 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
2256 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_SINGLE_CONTEXT;
2257 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
2258 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_ALL_CONTEXTS;
2259 else
2260 {
2261 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
2262 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2263 LogRelFunc(("Only INDIV_ADDR supported. Ignoring VPID.\n"));
2264 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
2265 LogRelFunc(("Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
2266 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED;
2267 pVM->hm.s.vmx.fVpid = false;
2268 }
2269 }
2270 else
2271 {
2272 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
2273 Log4Func(("VPID supported without INVEPT support. Ignoring VPID.\n"));
2274 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED;
2275 pVM->hm.s.vmx.fVpid = false;
2276 }
2277 }
2278
2279 /*
2280 * Setup the handler for flushing tagged-TLBs.
2281 */
2282 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
2283 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_EPT_VPID;
2284 else if (pVM->hm.s.fNestedPaging)
2285 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_EPT;
2286 else if (pVM->hm.s.vmx.fVpid)
2287 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_VPID;
2288 else
2289 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_NONE;
2290 return VINF_SUCCESS;
2291}
2292
2293
2294/**
2295 * Sets up pin-based VM-execution controls in the VMCS.
2296 *
2297 * @returns VBox status code.
2298 * @param pVCpu The cross context virtual CPU structure.
2299 *
2300 * @remarks We don't really care about optimizing vmwrites here as it's done only
2301 * once per VM and hence we don't care about VMCS-field cache comparisons.
2302 */
2303static int hmR0VmxSetupPinCtls(PVMCPU pVCpu)
2304{
2305 PVM pVM = pVCpu->CTX_SUFF(pVM);
2306 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */
2307 uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
2308
2309 fVal |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts cause a VM-exit. */
2310 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */
2311
2312 if (pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
2313 fVal |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */
2314
2315 /* Enable the VMX preemption timer. */
2316 if (pVM->hm.s.vmx.fUsePreemptTimer)
2317 {
2318 Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
2319 fVal |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
2320 }
2321
2322#if 0
2323 /* Enable posted-interrupt processing. */
2324 if (pVM->hm.s.fPostedIntrs)
2325 {
2326 Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR);
2327 Assert(pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT);
2328 fVal |= VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR;
2329 }
2330#endif
2331
2332 if ((fVal & fZap) != fVal)
2333 {
2334 LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",
2335 pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, fVal, fZap));
2336 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
2337 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2338 }
2339
2340 /* Commit it to the VMCS and update our cache. */
2341 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, fVal);
2342 AssertRCReturn(rc, rc);
2343 pVCpu->hm.s.vmx.u32PinCtls = fVal;
2344
2345 return VINF_SUCCESS;
2346}
2347
2348
2349/**
2350 * Sets up secondary processor-based VM-execution controls in the VMCS.
2351 *
2352 * @returns VBox status code.
2353 * @param pVCpu The cross context virtual CPU structure.
2354 *
2355 * @remarks We don't really care about optimizing vmwrites here as it's done only
2356 * once per VM and hence we don't care about VMCS-field cache comparisons.
2357 */
2358static int hmR0VmxSetupProcCtls2(PVMCPU pVCpu)
2359{
2360 PVM pVM = pVCpu->CTX_SUFF(pVM);
2361 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
2362 uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2363
2364 /* WBINVD causes a VM-exit. */
2365 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
2366 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT;
2367
2368 /* Enable EPT (aka nested-paging). */
2369 if (pVM->hm.s.fNestedPaging)
2370 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_EPT;
2371
2372 /*
2373 * Enable the INVPCID instruction if supported by the hardware and we expose
2374 * it to the guest. Without this, guest executing INVPCID would cause a #UD.
2375 */
2376 if ( (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
2377 && pVM->cpum.ro.GuestFeatures.fInvpcid)
2378 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
2379
2380 /* Enable VPID. */
2381 if (pVM->hm.s.vmx.fVpid)
2382 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VPID;
2383
2384 /* Enable Unrestricted guest execution. */
2385 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2386 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST;
2387
2388#if 0
2389 if (pVM->hm.s.fVirtApicRegs)
2390 {
2391 /* Enable APIC-register virtualization. */
2392 Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT);
2393 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT;
2394
2395 /* Enable virtual-interrupt delivery. */
2396 Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY);
2397 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY;
2398 }
2399#endif
2400
2401 /* Enable Virtual-APIC page accesses if supported by the CPU. This is where the TPR shadow resides. */
2402 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
2403 * done dynamically. */
2404 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
2405 {
2406 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
2407 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
2408 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
2409 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
2410 AssertRCReturn(rc, rc);
2411 }
2412
2413 /* Enable RDTSCP. */
2414 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
2415 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP;
2416
2417 /* Enable Pause-Loop exiting. */
2418 if ( pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT
2419 && pVM->hm.s.vmx.cPleGapTicks
2420 && pVM->hm.s.vmx.cPleWindowTicks)
2421 {
2422 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT;
2423
2424 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks);
2425 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks);
2426 AssertRCReturn(rc, rc);
2427 }
2428
2429 if ((fVal & fZap) != fVal)
2430 {
2431 LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",
2432 pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, fVal, fZap));
2433 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
2434 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2435 }
2436
2437 /* Commit it to the VMCS and update our cache. */
2438 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, fVal);
2439 AssertRCReturn(rc, rc);
2440 pVCpu->hm.s.vmx.u32ProcCtls2 = fVal;
2441
2442 return VINF_SUCCESS;
2443}
2444
2445
2446/**
2447 * Sets up processor-based VM-execution controls in the VMCS.
2448 *
2449 * @returns VBox status code.
2450 * @param pVCpu The cross context virtual CPU structure.
2451 *
2452 * @remarks We don't really care about optimizing vmwrites here as it's done only
2453 * once per VM and hence we don't care about VMCS-field cache comparisons.
2454 */
2455static int hmR0VmxSetupProcCtls(PVMCPU pVCpu)
2456{
2457 PVM pVM = pVCpu->CTX_SUFF(pVM);
2458 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */
2459 uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2460
2461 fVal |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */
2462 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
2463 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
2464 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
2465 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */
2466 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */
2467 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
2468
2469 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
2470 if ( !(pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
2471 || (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
2472 {
2473 LogRelFunc(("Unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
2474 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
2475 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2476 }
2477
2478 /* Without Nested Paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
2479 if (!pVM->hm.s.fNestedPaging)
2480 {
2481 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
2482 fVal |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
2483 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
2484 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
2485 }
2486
2487 /* Use TPR shadowing if supported by the CPU. */
2488 if ( PDMHasApic(pVM)
2489 && pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
2490 {
2491 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2492 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
2493 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
2494 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
2495 AssertRCReturn(rc, rc);
2496
2497 fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
2498 /* CR8 writes cause a VM-exit based on TPR threshold. */
2499 Assert(!(fVal & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
2500 Assert(!(fVal & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
2501 }
2502 else
2503 {
2504 /*
2505 * Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is invalid on 32-bit Intel CPUs.
2506 * Set this control only for 64-bit guests.
2507 */
2508 if (pVM->hm.s.fAllow64BitGuests)
2509 {
2510 fVal |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */
2511 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */
2512 }
2513 }
2514
2515 /* Use MSR-bitmaps if supported by the CPU. */
2516 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
2517 {
2518 fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
2519
2520 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2521 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
2522 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
2523 AssertRCReturn(rc, rc);
2524
2525 /*
2526 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
2527 * automatically using dedicated fields in the VMCS.
2528 */
2529 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2530 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2531 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2532 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2533 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2534#if HC_ARCH_BITS == 64
2535 /*
2536 * Set passthru permissions for the following MSRs (mandatory for VT-x) required for 64-bit guests.
2537 */
2538 if (pVM->hm.s.fAllow64BitGuests)
2539 {
2540 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2541 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2542 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2543 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2544 }
2545#endif
2546 /*
2547 * The IA32_PRED_CMD MSR is write-only and has no state associated with it. We never need to intercept
2548 * access (writes need to be executed without exiting, reds will #GP-fault anyway).
2549 */
2550 if (pVM->cpum.ro.GuestFeatures.fIbpb)
2551 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_PRED_CMD, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
2552
2553 /* Though MSR_IA32_PERF_GLOBAL_CTRL is saved/restored lazily, we want intercept reads/write to it for now. */
2554 }
2555
2556 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
2557 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
2558 fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
2559
2560 if ((fVal & fZap) != fVal)
2561 {
2562 LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",
2563 pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, fVal, fZap));
2564 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
2565 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2566 }
2567
2568 /* Commit it to the VMCS and update our cache. */
2569 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, fVal);
2570 AssertRCReturn(rc, rc);
2571 pVCpu->hm.s.vmx.u32ProcCtls = fVal;
2572
2573 /* Set up secondary processor-based VM-execution controls if the CPU supports it. */
2574 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
2575 return hmR0VmxSetupProcCtls2(pVCpu);
2576
2577 /* Sanity check, should not really happen. */
2578 if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest))
2579 {
2580 LogRelFunc(("Unrestricted Guest enabled when secondary processor-based VM-execution controls not available\n"));
2581 pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;
2582 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2583 }
2584
2585 /* Old CPUs without secondary processor-based VM-execution controls would end up here. */
2586 return VINF_SUCCESS;
2587}
2588
2589
2590/**
2591 * Sets up miscellaneous (everything other than Pin & Processor-based
2592 * VM-execution) control fields in the VMCS.
2593 *
2594 * @returns VBox status code.
2595 * @param pVCpu The cross context virtual CPU structure.
2596 */
2597static int hmR0VmxSetupMiscCtls(PVMCPU pVCpu)
2598{
2599 AssertPtr(pVCpu);
2600
2601 int rc = VERR_GENERAL_FAILURE;
2602
2603 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2604#if 0
2605 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxExportGuestCR3AndCR4())*/
2606 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0);
2607 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0);
2608
2609 /*
2610 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
2611 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
2612 * We thus use the exception bitmap to control it rather than use both.
2613 */
2614 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0);
2615 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0);
2616
2617 /* All IO & IOIO instructions cause VM-exits. */
2618 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0);
2619 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0);
2620
2621 /* Initialize the MSR-bitmap area. */
2622 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
2623 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
2624 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0);
2625 AssertRCReturn(rc, rc);
2626#endif
2627
2628 /* Setup MSR auto-load/store area. */
2629 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
2630 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
2631 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2632 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
2633 AssertRCReturn(rc, rc);
2634
2635 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
2636 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
2637 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
2638 AssertRCReturn(rc, rc);
2639
2640 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
2641 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
2642 AssertRCReturn(rc, rc);
2643
2644 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
2645#if 0
2646 /* Setup debug controls */
2647 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0);
2648 rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
2649 AssertRCReturn(rc, rc);
2650#endif
2651
2652 return rc;
2653}
2654
2655
2656/**
2657 * Sets up the initial exception bitmap in the VMCS based on static conditions.
2658 *
2659 * We shall setup those exception intercepts that don't change during the
2660 * lifetime of the VM here. The rest are done dynamically while loading the
2661 * guest state.
2662 *
2663 * @returns VBox status code.
2664 * @param pVCpu The cross context virtual CPU structure.
2665 */
2666static int hmR0VmxInitXcptBitmap(PVMCPU pVCpu)
2667{
2668 AssertPtr(pVCpu);
2669
2670 uint32_t uXcptBitmap;
2671
2672 /* Must always intercept #AC to prevent the guest from hanging the CPU. */
2673 uXcptBitmap = RT_BIT_32(X86_XCPT_AC);
2674
2675 /* Because we need to maintain the DR6 state even when intercepting DRx reads
2676 and writes, and because recursive #DBs can cause the CPU hang, we must always
2677 intercept #DB. */
2678 uXcptBitmap |= RT_BIT_32(X86_XCPT_DB);
2679
2680 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
2681 if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
2682 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
2683
2684 /* Commit it to the VMCS. */
2685 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2686 AssertRCReturn(rc, rc);
2687
2688 /* Update our cache of the exception bitmap. */
2689 pVCpu->hm.s.vmx.u32XcptBitmap = uXcptBitmap;
2690 return VINF_SUCCESS;
2691}
2692
2693
2694/**
2695 * Does per-VM VT-x initialization.
2696 *
2697 * @returns VBox status code.
2698 * @param pVM The cross context VM structure.
2699 */
2700VMMR0DECL(int) VMXR0InitVM(PVM pVM)
2701{
2702 LogFlowFunc(("pVM=%p\n", pVM));
2703
2704 int rc = hmR0VmxStructsAlloc(pVM);
2705 if (RT_FAILURE(rc))
2706 {
2707 LogRelFunc(("hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
2708 return rc;
2709 }
2710
2711 return VINF_SUCCESS;
2712}
2713
2714
2715/**
2716 * Does per-VM VT-x termination.
2717 *
2718 * @returns VBox status code.
2719 * @param pVM The cross context VM structure.
2720 */
2721VMMR0DECL(int) VMXR0TermVM(PVM pVM)
2722{
2723 LogFlowFunc(("pVM=%p\n", pVM));
2724
2725#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2726 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
2727 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
2728#endif
2729 hmR0VmxStructsFree(pVM);
2730 return VINF_SUCCESS;
2731}
2732
2733
2734/**
2735 * Sets up the VM for execution under VT-x.
2736 * This function is only called once per-VM during initialization.
2737 *
2738 * @returns VBox status code.
2739 * @param pVM The cross context VM structure.
2740 */
2741VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
2742{
2743 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
2744 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2745
2746 LogFlowFunc(("pVM=%p\n", pVM));
2747
2748 /*
2749 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be
2750 * allocated. We no longer support the highly unlikely case of UnrestrictedGuest without
2751 * pRealModeTSS, see hmR3InitFinalizeR0Intel().
2752 */
2753 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
2754 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
2755 || !pVM->hm.s.vmx.pRealModeTSS))
2756 {
2757 LogRelFunc(("Invalid real-on-v86 state.\n"));
2758 return VERR_INTERNAL_ERROR;
2759 }
2760
2761 /* Initialize these always, see hmR3InitFinalizeR0().*/
2762 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NONE;
2763 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NONE;
2764
2765 /* Setup the tagged-TLB flush handlers. */
2766 int rc = hmR0VmxSetupTaggedTlb(pVM);
2767 if (RT_FAILURE(rc))
2768 {
2769 LogRelFunc(("hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
2770 return rc;
2771 }
2772
2773 /* Check if we can use the VMCS controls for swapping the EFER MSR. */
2774 Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
2775#if HC_ARCH_BITS == 64
2776 if ( (pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
2777 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
2778 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR))
2779 {
2780 pVM->hm.s.vmx.fSupportsVmcsEfer = true;
2781 }
2782#endif
2783
2784 /* At least verify VMX is enabled, since we can't check if we're in VMX root mode without #GP'ing. */
2785 RTCCUINTREG uHostCR4 = ASMGetCR4();
2786 if (RT_UNLIKELY(!(uHostCR4 & X86_CR4_VMXE)))
2787 return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
2788
2789 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2790 {
2791 PVMCPU pVCpu = &pVM->aCpus[i];
2792 AssertPtr(pVCpu);
2793 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
2794
2795 /* Log the VCPU pointers, useful for debugging SMP VMs. */
2796 Log4Func(("pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
2797
2798 /* Set revision dword at the beginning of the VMCS structure. */
2799 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.Msrs.u64BasicInfo);
2800
2801 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
2802 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2803 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc\n", rc),
2804 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2805
2806 /* Load this VMCS as the current VMCS. */
2807 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2808 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc\n", rc),
2809 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2810
2811 rc = hmR0VmxSetupPinCtls(pVCpu);
2812 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc\n", rc),
2813 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2814
2815 rc = hmR0VmxSetupProcCtls(pVCpu);
2816 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc\n", rc),
2817 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2818
2819 rc = hmR0VmxSetupMiscCtls(pVCpu);
2820 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc\n", rc),
2821 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2822
2823 rc = hmR0VmxInitXcptBitmap(pVCpu);
2824 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc\n", rc),
2825 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2826
2827#if HC_ARCH_BITS == 32
2828 rc = hmR0VmxInitVmcsReadCache(pVCpu);
2829 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc\n", rc),
2830 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2831#endif
2832
2833 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
2834 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2835 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc\n", rc),
2836 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc);
2837
2838 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
2839
2840 hmR0VmxUpdateErrorRecord(pVCpu, rc);
2841 }
2842
2843 return VINF_SUCCESS;
2844}
2845
2846
2847/**
2848 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2849 * the VMCS.
2850 *
2851 * @returns VBox status code.
2852 */
2853static int hmR0VmxExportHostControlRegs(void)
2854{
2855 RTCCUINTREG uReg = ASMGetCR0();
2856 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2857 AssertRCReturn(rc, rc);
2858
2859 uReg = ASMGetCR3();
2860 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2861 AssertRCReturn(rc, rc);
2862
2863 uReg = ASMGetCR4();
2864 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2865 AssertRCReturn(rc, rc);
2866 return rc;
2867}
2868
2869
2870/**
2871 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2872 * the host-state area in the VMCS.
2873 *
2874 * @returns VBox status code.
2875 * @param pVCpu The cross context virtual CPU structure.
2876 */
2877static int hmR0VmxExportHostSegmentRegs(PVMCPU pVCpu)
2878{
2879#if HC_ARCH_BITS == 64
2880/**
2881 * Macro for adjusting host segment selectors to satisfy VT-x's VM-entry
2882 * requirements. See hmR0VmxExportHostSegmentRegs().
2883 */
2884# define VMXLOCAL_ADJUST_HOST_SEG(seg, selValue) \
2885 if ((selValue) & (X86_SEL_RPL | X86_SEL_LDT)) \
2886 { \
2887 bool fValidSelector = true; \
2888 if ((selValue) & X86_SEL_LDT) \
2889 { \
2890 uint32_t uAttr = ASMGetSegAttr((selValue)); \
2891 fValidSelector = RT_BOOL(uAttr != UINT32_MAX && (uAttr & X86_DESC_P)); \
2892 } \
2893 if (fValidSelector) \
2894 { \
2895 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##seg; \
2896 pVCpu->hm.s.vmx.RestoreHost.uHostSel##seg = (selValue); \
2897 } \
2898 (selValue) = 0; \
2899 }
2900
2901 /*
2902 * If we've executed guest code using VT-x, the host-state bits will be messed up. We
2903 * should -not- save the messed up state without restoring the original host-state,
2904 * see @bugref{7240}.
2905 *
2906 * This apparently can happen (most likely the FPU changes), deal with it rather than
2907 * asserting. Was observed booting Solaris 10u10 32-bit guest.
2908 */
2909 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
2910 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
2911 {
2912 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags,
2913 pVCpu->idCpu));
2914 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
2915 }
2916 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
2917#else
2918 RT_NOREF(pVCpu);
2919#endif
2920
2921 /*
2922 * Host DS, ES, FS and GS segment registers.
2923 */
2924#if HC_ARCH_BITS == 64
2925 RTSEL uSelDS = ASMGetDS();
2926 RTSEL uSelES = ASMGetES();
2927 RTSEL uSelFS = ASMGetFS();
2928 RTSEL uSelGS = ASMGetGS();
2929#else
2930 RTSEL uSelDS = 0;
2931 RTSEL uSelES = 0;
2932 RTSEL uSelFS = 0;
2933 RTSEL uSelGS = 0;
2934#endif
2935
2936 /*
2937 * Host CS and SS segment registers.
2938 */
2939 RTSEL uSelCS = ASMGetCS();
2940 RTSEL uSelSS = ASMGetSS();
2941
2942 /*
2943 * Host TR segment register.
2944 */
2945 RTSEL uSelTR = ASMGetTR();
2946
2947#if HC_ARCH_BITS == 64
2948 /*
2949 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to
2950 * gain VM-entry and restore them before we get preempted.
2951 *
2952 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
2953 */
2954 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS);
2955 VMXLOCAL_ADJUST_HOST_SEG(ES, uSelES);
2956 VMXLOCAL_ADJUST_HOST_SEG(FS, uSelFS);
2957 VMXLOCAL_ADJUST_HOST_SEG(GS, uSelGS);
2958# undef VMXLOCAL_ADJUST_HOST_SEG
2959#endif
2960
2961 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2962 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2963 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2964 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2965 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2966 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2967 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2968 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));
2969 Assert(uSelCS);
2970 Assert(uSelTR);
2971
2972 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2973#if 0
2974 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE))
2975 Assert(uSelSS != 0);
2976#endif
2977
2978 /* Write these host selector fields into the host-state area in the VMCS. */
2979 int rc = VMXWriteVmcs32(VMX_VMCS16_HOST_CS_SEL, uSelCS);
2980 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_SS_SEL, uSelSS);
2981#if HC_ARCH_BITS == 64
2982 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_DS_SEL, uSelDS);
2983 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_ES_SEL, uSelES);
2984 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FS_SEL, uSelFS);
2985 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_GS_SEL, uSelGS);
2986#else
2987 NOREF(uSelDS);
2988 NOREF(uSelES);
2989 NOREF(uSelFS);
2990 NOREF(uSelGS);
2991#endif
2992 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_TR_SEL, uSelTR);
2993 AssertRCReturn(rc, rc);
2994
2995 /*
2996 * Host GDTR and IDTR.
2997 */
2998 RTGDTR Gdtr;
2999 RTIDTR Idtr;
3000 RT_ZERO(Gdtr);
3001 RT_ZERO(Idtr);
3002 ASMGetGDTR(&Gdtr);
3003 ASMGetIDTR(&Idtr);
3004 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt);
3005 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt);
3006 AssertRCReturn(rc, rc);
3007
3008#if HC_ARCH_BITS == 64
3009 /*
3010 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps
3011 * them to the maximum limit (0xffff) on every VM-exit.
3012 */
3013 if (Gdtr.cbGdt != 0xffff)
3014 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
3015
3016 /*
3017 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT" and
3018 * Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit
3019 * as 0xfff, VT-x bloating the limit to 0xffff shouldn't cause any different CPU behavior.
3020 * However, several hosts either insists on 0xfff being the limit (Windows Patch Guard) or
3021 * uses the limit for other purposes (darwin puts the CPU ID in there but botches sidt
3022 * alignment in at least one consumer). So, we're only allowing the IDTR.LIMIT to be left
3023 * at 0xffff on hosts where we are sure it won't cause trouble.
3024 */
3025# if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
3026 if (Idtr.cbIdt < 0x0fff)
3027# else
3028 if (Idtr.cbIdt != 0xffff)
3029# endif
3030 {
3031 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
3032 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
3033 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
3034 }
3035#endif
3036
3037 /*
3038 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI
3039 * and RPL bits is effectively what the CPU does for "scaling by 8". TI is always 0 and
3040 * RPL should be too in most cases.
3041 */
3042 AssertMsgReturn((uSelTR | X86_SEL_RPL_LDT) <= Gdtr.cbGdt,
3043 ("TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt), VERR_VMX_INVALID_HOST_STATE);
3044
3045 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
3046#if HC_ARCH_BITS == 64
3047 uintptr_t uTRBase = X86DESC64_BASE(pDesc);
3048
3049 /*
3050 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on
3051 * all VM-exits. The type is the same for 64-bit busy TSS[1]. The limit needs manual
3052 * restoration if the host has something else. Task switching is not supported in 64-bit
3053 * mode[2], but the limit still matters as IOPM is supported in 64-bit mode. Restoring the
3054 * limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
3055 *
3056 * [1] See Intel spec. 3.5 "System Descriptor Types".
3057 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
3058 */
3059 PVM pVM = pVCpu->CTX_SUFF(pVM);
3060 Assert(pDesc->System.u4Type == 11);
3061 if ( pDesc->System.u16LimitLow != 0x67
3062 || pDesc->System.u4LimitHigh)
3063 {
3064 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
3065 /* If the host has made GDT read-only, we would need to temporarily toggle CR0.WP before writing the GDT. */
3066 if (pVM->hm.s.fHostKernelFeatures & SUPKERNELFEATURES_GDT_READ_ONLY)
3067 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_READ_ONLY;
3068 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
3069 }
3070
3071 /*
3072 * Store the GDTR as we need it when restoring the GDT and while restoring the TR.
3073 */
3074 if (pVCpu->hm.s.vmx.fRestoreHostFlags & (VMX_RESTORE_HOST_GDTR | VMX_RESTORE_HOST_SEL_TR))
3075 {
3076 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
3077 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
3078 if (pVM->hm.s.fHostKernelFeatures & SUPKERNELFEATURES_GDT_NEED_WRITABLE)
3079 {
3080 /* The GDT is read-only but the writable GDT is available. */
3081 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_NEED_WRITABLE;
3082 pVCpu->hm.s.vmx.RestoreHost.HostGdtrRw.cb = Gdtr.cbGdt;
3083 rc = SUPR0GetCurrentGdtRw(&pVCpu->hm.s.vmx.RestoreHost.HostGdtrRw.uAddr);
3084 AssertRCReturn(rc, rc);
3085 }
3086 }
3087#else
3088 uintptr_t uTRBase = X86DESC_BASE(pDesc);
3089#endif
3090 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
3091 AssertRCReturn(rc, rc);
3092
3093 /*
3094 * Host FS base and GS base.
3095 */
3096#if HC_ARCH_BITS == 64
3097 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
3098 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
3099 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase);
3100 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase);
3101 AssertRCReturn(rc, rc);
3102
3103 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
3104 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
3105 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
3106 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
3107 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
3108#endif
3109 return VINF_SUCCESS;
3110}
3111
3112
3113/**
3114 * Exports certain host MSRs in the VM-exit MSR-load area and some in the
3115 * host-state area of the VMCS.
3116 *
3117 * Theses MSRs will be automatically restored on the host after every successful
3118 * VM-exit.
3119 *
3120 * @returns VBox status code.
3121 * @param pVCpu The cross context virtual CPU structure.
3122 *
3123 * @remarks No-long-jump zone!!!
3124 */
3125static int hmR0VmxExportHostMsrs(PVMCPU pVCpu)
3126{
3127 AssertPtr(pVCpu);
3128 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
3129
3130 /*
3131 * Save MSRs that we restore lazily (due to preemption or transition to ring-3)
3132 * rather than swapping them on every VM-entry.
3133 */
3134 hmR0VmxLazySaveHostMsrs(pVCpu);
3135
3136 /*
3137 * Host Sysenter MSRs.
3138 */
3139 int rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
3140#if HC_ARCH_BITS == 32
3141 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
3142 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
3143#else
3144 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
3145 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
3146#endif
3147 AssertRCReturn(rc, rc);
3148
3149 /*
3150 * Host EFER MSR.
3151 *
3152 * If the CPU supports the newer VMCS controls for managing EFER, use it. Otherwise it's
3153 * done as part of auto-load/store MSR area in the VMCS, see hmR0VmxExportGuestMsrs().
3154 */
3155 PVM pVM = pVCpu->CTX_SUFF(pVM);
3156 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
3157 {
3158 rc = VMXWriteVmcs64(VMX_VMCS64_HOST_EFER_FULL, pVM->hm.s.vmx.u64HostEfer);
3159 AssertRCReturn(rc, rc);
3160 }
3161
3162 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see hmR0VmxExportGuestExitCtls(). */
3163
3164 return VINF_SUCCESS;
3165}
3166
3167
3168/**
3169 * Figures out if we need to swap the EFER MSR which is particularly expensive.
3170 *
3171 * We check all relevant bits. For now, that's everything besides LMA/LME, as
3172 * these two bits are handled by VM-entry, see hmR0VmxExportGuestExitCtls() and
3173 * hmR0VMxExportGuestEntryCtls().
3174 *
3175 * @returns true if we need to load guest EFER, false otherwise.
3176 * @param pVCpu The cross context virtual CPU structure.
3177 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3178 * out-of-sync. Make sure to update the required fields
3179 * before using them.
3180 *
3181 * @remarks Requires EFER, CR4.
3182 * @remarks No-long-jump zone!!!
3183 */
3184static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
3185{
3186#ifdef HMVMX_ALWAYS_SWAP_EFER
3187 return true;
3188#endif
3189
3190#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
3191 /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */
3192 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3193 return false;
3194#endif
3195
3196 PVM pVM = pVCpu->CTX_SUFF(pVM);
3197 uint64_t const u64HostEfer = pVM->hm.s.vmx.u64HostEfer;
3198 uint64_t const u64GuestEfer = pMixedCtx->msrEFER;
3199
3200 /*
3201 * For 64-bit guests, if EFER.SCE bit differs, we need to swap EFER to ensure that the
3202 * guest's SYSCALL behaviour isn't broken, see @bugref{7386}.
3203 */
3204 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
3205 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
3206 {
3207 return true;
3208 }
3209
3210 /*
3211 * If the guest uses PAE and EFER.NXE bit differs, we need to swap EFER as it
3212 * affects guest paging. 64-bit paging implies CR4.PAE as well.
3213 * See Intel spec. 4.5 "IA-32e Paging" and Intel spec. 4.1.1 "Three Paging Modes".
3214 */
3215 if ( (pMixedCtx->cr4 & X86_CR4_PAE)
3216 && (pMixedCtx->cr0 & X86_CR0_PG)
3217 && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
3218 {
3219 /* Assert that host is PAE capable. */
3220 Assert(pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_NX);
3221 return true;
3222 }
3223
3224 return false;
3225}
3226
3227
3228/**
3229 * Exports the guest state with appropriate VM-entry controls in the VMCS.
3230 *
3231 * These controls can affect things done on VM-exit; e.g. "load debug controls",
3232 * see Intel spec. 24.8.1 "VM-entry controls".
3233 *
3234 * @returns VBox status code.
3235 * @param pVCpu The cross context virtual CPU structure.
3236 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3237 * out-of-sync. Make sure to update the required fields
3238 * before using them.
3239 *
3240 * @remarks Requires EFER.
3241 * @remarks No-long-jump zone!!!
3242 */
3243static int hmR0VmxExportGuestEntryCtls(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
3244{
3245 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_CTLS)
3246 {
3247 PVM pVM = pVCpu->CTX_SUFF(pVM);
3248 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */
3249 uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3250
3251 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
3252 fVal |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
3253
3254 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
3255 if (CPUMIsGuestInLongModeEx(pMixedCtx))
3256 {
3257 fVal |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
3258 Log4Func(("VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n"));
3259 }
3260 else
3261 Assert(!(fVal & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
3262
3263 /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */
3264 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3265 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3266 {
3267 fVal |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR;
3268 Log4Func(("VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n"));
3269 }
3270
3271 /*
3272 * The following should -not- be set (since we're not in SMM mode):
3273 * - VMX_VMCS_CTRL_ENTRY_ENTRY_SMM
3274 * - VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON
3275 */
3276
3277 /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
3278 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR. */
3279
3280 if ((fVal & fZap) != fVal)
3281 {
3282 Log4Func(("Invalid VM-entry controls combo! Cpu=%RX64 fVal=%RX64 fZap=%RX64\n",
3283 pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0, fVal, fZap));
3284 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
3285 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3286 }
3287
3288 /* Commit it to the VMCS and update our cache. */
3289 if (pVCpu->hm.s.vmx.u32EntryCtls != fVal)
3290 {
3291 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal);
3292 AssertRCReturn(rc, rc);
3293 pVCpu->hm.s.vmx.u32EntryCtls = fVal;
3294 }
3295
3296 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_CTLS);
3297 }
3298 return VINF_SUCCESS;
3299}
3300
3301
3302/**
3303 * Exports the guest state with appropriate VM-exit controls in the VMCS.
3304 *
3305 * @returns VBox status code.
3306 * @param pVCpu The cross context virtual CPU structure.
3307 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3308 * out-of-sync. Make sure to update the required fields
3309 * before using them.
3310 *
3311 * @remarks Requires EFER.
3312 */
3313static int hmR0VmxExportGuestExitCtls(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
3314{
3315 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_EXIT_CTLS)
3316 {
3317 PVM pVM = pVCpu->CTX_SUFF(pVM);
3318 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0; /* Bits set here must be set in the VMCS. */
3319 uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3320
3321 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
3322 fVal |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
3323
3324 /*
3325 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary.
3326 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in
3327 * hmR0VmxExportHostMsrs().
3328 */
3329#if HC_ARCH_BITS == 64
3330 fVal |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3331 Log4Func(("VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n"));
3332#else
3333 Assert( pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64
3334 || pVCpu->hm.s.vmx.pfnStartVM == VMXR0StartVM32);
3335 /* Set the host address-space size based on the switcher, not guest state. See @bugref{8432}. */
3336 if (pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64)
3337 {
3338 /* The switcher returns to long mode, EFER is managed by the switcher. */
3339 fVal |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
3340 Log4Func(("VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n"));
3341 }
3342 else
3343 Assert(!(fVal & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
3344#endif
3345
3346 /* If the newer VMCS fields for managing EFER exists, use it. */
3347 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
3348 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
3349 {
3350 fVal |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR
3351 | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;
3352 Log4Func(("VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR and VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n"));
3353 }
3354
3355 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
3356 Assert(!(fVal & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
3357
3358 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
3359 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
3360 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR. */
3361
3362 /* Enable saving of the VMX preemption timer value on VM-exit. */
3363 if ( pVM->hm.s.vmx.fUsePreemptTimer
3364 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER))
3365 fVal |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
3366
3367 if ((fVal & fZap) != fVal)
3368 {
3369 LogRelFunc(("Invalid VM-exit controls combo! cpu=%RX64 fVal=%RX64 fZap=%RX64\n",
3370 pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, fVal, fZap));
3371 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
3372 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3373 }
3374
3375 /* Commit it to the VMCS and update our cache. */
3376 if (pVCpu->hm.s.vmx.u32ExitCtls != fVal)
3377 {
3378 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal);
3379 AssertRCReturn(rc, rc);
3380 pVCpu->hm.s.vmx.u32ExitCtls = fVal;
3381 }
3382
3383 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_EXIT_CTLS);
3384 }
3385 return VINF_SUCCESS;
3386}
3387
3388
3389/**
3390 * Sets the TPR threshold in the VMCS.
3391 *
3392 * @returns VBox status code.
3393 * @param pVCpu The cross context virtual CPU structure.
3394 * @param u32TprThreshold The TPR threshold (task-priority class only).
3395 */
3396DECLINLINE(int) hmR0VmxApicSetTprThreshold(PVMCPU pVCpu, uint32_t u32TprThreshold)
3397{
3398 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
3399 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW); RT_NOREF_PV(pVCpu);
3400 return VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
3401}
3402
3403
3404/**
3405 * Exports the guest APIC TPR state into the VMCS.
3406 *
3407 * @returns VBox status code.
3408 * @param pVCpu The cross context virtual CPU structure.
3409 *
3410 * @remarks No-long-jump zone!!!
3411 */
3412static int hmR0VmxExportGuestApicTpr(PVMCPU pVCpu)
3413{
3414 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
3415 {
3416 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
3417
3418 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
3419 && APICIsEnabled(pVCpu))
3420 {
3421 /*
3422 * Setup TPR shadowing.
3423 */
3424 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
3425 {
3426 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
3427
3428 bool fPendingIntr = false;
3429 uint8_t u8Tpr = 0;
3430 uint8_t u8PendingIntr = 0;
3431 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
3432 AssertRCReturn(rc, rc);
3433
3434 /*
3435 * If there are interrupts pending but masked by the TPR, instruct VT-x to
3436 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
3437 * priority of the pending interrupt so we can deliver the interrupt. If there
3438 * are no interrupts pending, set threshold to 0 to not cause any
3439 * TPR-below-threshold VM-exits.
3440 */
3441 pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR] = u8Tpr;
3442 uint32_t u32TprThreshold = 0;
3443 if (fPendingIntr)
3444 {
3445 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR (which is the Task-Priority Class). */
3446 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
3447 const uint8_t u8TprPriority = u8Tpr >> 4;
3448 if (u8PendingPriority <= u8TprPriority)
3449 u32TprThreshold = u8PendingPriority;
3450 }
3451
3452 rc = hmR0VmxApicSetTprThreshold(pVCpu, u32TprThreshold);
3453 AssertRCReturn(rc, rc);
3454 }
3455 }
3456 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
3457 }
3458 return VINF_SUCCESS;
3459}
3460
3461
3462/**
3463 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
3464 *
3465 * @returns Guest's interruptibility-state.
3466 * @param pVCpu The cross context virtual CPU structure.
3467 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3468 * out-of-sync. Make sure to update the required fields
3469 * before using them.
3470 *
3471 * @remarks No-long-jump zone!!!
3472 */
3473static uint32_t hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3474{
3475 /*
3476 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
3477 */
3478 uint32_t fIntrState = 0;
3479 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3480 {
3481 /* If inhibition is active, RIP & RFLAGS should've been accessed
3482 (i.e. read previously from the VMCS or from ring-3). */
3483#ifdef VBOX_STRICT
3484 uint64_t const fExtrn = ASMAtomicUoReadU64(&pMixedCtx->fExtrn);
3485 AssertMsg(!(fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)), ("%#x\n", fExtrn));
3486#endif
3487 if (pMixedCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
3488 {
3489 if (pMixedCtx->eflags.Bits.u1IF)
3490 fIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
3491 else
3492 fIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
3493 }
3494 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
3495 {
3496 /*
3497 * We can clear the inhibit force flag as even if we go back to the recompiler
3498 * without executing guest code in VT-x, the flag's condition to be cleared is
3499 * met and thus the cleared state is correct.
3500 */
3501 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
3502 }
3503 }
3504
3505 /*
3506 * NMIs to the guest are blocked after an NMI is injected until the guest executes an IRET. We only
3507 * bother with virtual-NMI blocking when we have support for virtual NMIs in the CPU, otherwise
3508 * setting this would block host-NMIs and IRET will not clear the blocking.
3509 *
3510 * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}.
3511 */
3512 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)
3513 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
3514 {
3515 fIntrState |= VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI;
3516 }
3517
3518 return fIntrState;
3519}
3520
3521
3522/**
3523 * Exports the guest's interruptibility-state into the guest-state area in the
3524 * VMCS.
3525 *
3526 * @returns VBox status code.
3527 * @param pVCpu The cross context virtual CPU structure.
3528 * @param fIntrState The interruptibility-state to set.
3529 */
3530static int hmR0VmxExportGuestIntrState(PVMCPU pVCpu, uint32_t fIntrState)
3531{
3532 NOREF(pVCpu);
3533 AssertMsg(!(fIntrState & 0xfffffff0), ("%#x\n", fIntrState)); /* Bits 31:4 MBZ. */
3534 Assert((fIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
3535 return VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, fIntrState);
3536}
3537
3538
3539/**
3540 * Exports the exception intercepts required for guest execution in the VMCS.
3541 *
3542 * @returns VBox status code.
3543 * @param pVCpu The cross context virtual CPU structure.
3544 *
3545 * @remarks No-long-jump zone!!!
3546 */
3547static int hmR0VmxExportGuestXcptIntercepts(PVMCPU pVCpu)
3548{
3549 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS)
3550 {
3551 uint32_t uXcptBitmap = pVCpu->hm.s.vmx.u32XcptBitmap;
3552
3553 /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxExportSharedCR0(). */
3554 if (pVCpu->hm.s.fGIMTrapXcptUD)
3555 uXcptBitmap |= RT_BIT(X86_XCPT_UD);
3556#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3557 else
3558 uXcptBitmap &= ~RT_BIT(X86_XCPT_UD);
3559#endif
3560
3561 Assert(uXcptBitmap & RT_BIT_32(X86_XCPT_AC));
3562 Assert(uXcptBitmap & RT_BIT_32(X86_XCPT_DB));
3563
3564 if (uXcptBitmap != pVCpu->hm.s.vmx.u32XcptBitmap)
3565 {
3566 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
3567 AssertRCReturn(rc, rc);
3568 pVCpu->hm.s.vmx.u32XcptBitmap = uXcptBitmap;
3569 }
3570
3571 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
3572 Log4Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64\n", uXcptBitmap));
3573 }
3574 return VINF_SUCCESS;
3575}
3576
3577
3578/**
3579 * Exports the guest's RIP into the guest-state area in the VMCS.
3580 *
3581 * @returns VBox status code.
3582 * @param pVCpu The cross context virtual CPU structure.
3583 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3584 * out-of-sync. Make sure to update the required fields
3585 * before using them.
3586 *
3587 * @remarks No-long-jump zone!!!
3588 */
3589static int hmR0VmxExportGuestRip(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
3590{
3591 int rc = VINF_SUCCESS;
3592 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP)
3593 {
3594 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
3595
3596 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
3597 AssertRCReturn(rc, rc);
3598
3599 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RIP);
3600 Log4Func(("RIP=%#RX64\n", pMixedCtx->rip));
3601 }
3602 return rc;
3603}
3604
3605
3606/**
3607 * Exports the guest's RSP into the guest-state area in the VMCS.
3608 *
3609 * @returns VBox status code.
3610 * @param pVCpu The cross context virtual CPU structure.
3611 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3612 * out-of-sync. Make sure to update the required fields
3613 * before using them.
3614 *
3615 * @remarks No-long-jump zone!!!
3616 */
3617static int hmR0VmxExportGuestRsp(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
3618{
3619 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RSP)
3620 {
3621 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP);
3622
3623 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
3624 AssertRCReturn(rc, rc);
3625
3626 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RSP);
3627 }
3628 return VINF_SUCCESS;
3629}
3630
3631
3632/**
3633 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
3634 *
3635 * @returns VBox status code.
3636 * @param pVCpu The cross context virtual CPU structure.
3637 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3638 * out-of-sync. Make sure to update the required fields
3639 * before using them.
3640 *
3641 * @remarks No-long-jump zone!!!
3642 */
3643static int hmR0VmxExportGuestRflags(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
3644{
3645 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
3646 {
3647 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
3648
3649 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
3650 Let us assert it as such and use 32-bit VMWRITE. */
3651 Assert(!RT_HI_U32(pMixedCtx->rflags.u64));
3652 X86EFLAGS fEFlags = pMixedCtx->eflags;
3653 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
3654 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
3655
3656 /*
3657 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
3658 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
3659 * can run the real-mode guest code under Virtual 8086 mode.
3660 */
3661 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3662 {
3663 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3664 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3665 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
3666 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
3667 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
3668 }
3669
3670 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
3671 AssertRCReturn(rc, rc);
3672
3673 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
3674 Log4Func(("EFlags=%#RX32\n", fEFlags.u32));
3675 }
3676 return VINF_SUCCESS;
3677}
3678
3679
3680/**
3681 * Exports the guest CR0 control register into the guest-state area in the VMCS.
3682 *
3683 * The guest FPU state is always pre-loaded hence we don't need to bother about
3684 * sharing FPU related CR0 bits between the guest and host.
3685 *
3686 * @returns VBox status code.
3687 * @param pVCpu The cross context virtual CPU structure.
3688 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3689 * out-of-sync. Make sure to update the required fields
3690 * before using them.
3691 *
3692 * @remarks No-long-jump zone!!!
3693 */
3694static int hmR0VmxExportGuestCR0(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
3695{
3696 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR0)
3697 {
3698 PVM pVM = pVCpu->CTX_SUFF(pVM);
3699 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
3700 Assert(!RT_HI_U32(pMixedCtx->cr0));
3701
3702 uint32_t const u32ShadowCr0 = pMixedCtx->cr0;
3703 uint32_t u32GuestCr0 = pMixedCtx->cr0;
3704
3705 /*
3706 * Setup VT-x's view of the guest CR0.
3707 * Minimize VM-exits due to CR3 changes when we have NestedPaging.
3708 */
3709 uint32_t uProcCtls = pVCpu->hm.s.vmx.u32ProcCtls;
3710 if (pVM->hm.s.fNestedPaging)
3711 {
3712 if (CPUMIsGuestPagingEnabled(pVCpu))
3713 {
3714 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
3715 uProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3716 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
3717 }
3718 else
3719 {
3720 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
3721 uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3722 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3723 }
3724
3725 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
3726 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3727 uProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3728 }
3729 else
3730 {
3731 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
3732 u32GuestCr0 |= X86_CR0_WP;
3733 }
3734
3735 /*
3736 * Guest FPU bits.
3737 *
3738 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
3739 * using CR0.TS.
3740 *
3741 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
3742 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
3743 */
3744 u32GuestCr0 |= X86_CR0_NE;
3745
3746 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
3747 bool const fInterceptMF = !(u32ShadowCr0 & X86_CR0_NE);
3748
3749 /*
3750 * Update exception intercepts.
3751 */
3752 uint32_t uXcptBitmap = pVCpu->hm.s.vmx.u32XcptBitmap;
3753 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3754 {
3755 Assert(PDMVmmDevHeapIsEnabled(pVM));
3756 Assert(pVM->hm.s.vmx.pRealModeTSS);
3757 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
3758 }
3759 else
3760 {
3761 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
3762 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
3763 if (fInterceptMF)
3764 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
3765 }
3766
3767 /* Additional intercepts for debugging, define these yourself explicitly. */
3768#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3769 uXcptBitmap |= 0
3770 | RT_BIT(X86_XCPT_BP)
3771 | RT_BIT(X86_XCPT_DE)
3772 | RT_BIT(X86_XCPT_NM)
3773 | RT_BIT(X86_XCPT_TS)
3774 | RT_BIT(X86_XCPT_UD)
3775 | RT_BIT(X86_XCPT_NP)
3776 | RT_BIT(X86_XCPT_SS)
3777 | RT_BIT(X86_XCPT_GP)
3778 | RT_BIT(X86_XCPT_PF)
3779 | RT_BIT(X86_XCPT_MF)
3780 ;
3781#elif defined(HMVMX_ALWAYS_TRAP_PF)
3782 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
3783#endif
3784 Assert(pVM->hm.s.fNestedPaging || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
3785
3786 /*
3787 * Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW).
3788 */
3789 uint32_t fSetCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3790 uint32_t fZapCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
3791 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
3792 fSetCr0 &= ~(X86_CR0_PE | X86_CR0_PG);
3793 else
3794 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
3795
3796 u32GuestCr0 |= fSetCr0;
3797 u32GuestCr0 &= fZapCr0;
3798 u32GuestCr0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
3799
3800 /*
3801 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
3802 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
3803 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
3804 */
3805 uint32_t u32Cr0Mask = X86_CR0_PE
3806 | X86_CR0_NE
3807 | (pVM->hm.s.fNestedPaging ? 0 : X86_CR0_WP)
3808 | X86_CR0_PG
3809 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
3810 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
3811 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
3812
3813 /** @todo Avoid intercepting CR0.PE with unrestricted guests. Fix PGM
3814 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
3815 * and @bugref{6944}. */
3816#if 0
3817 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3818 u32Cr0Mask &= ~X86_CR0_PE;
3819#endif
3820 /*
3821 * Finally, update VMCS fields with the CR0 values and the exception bitmap.
3822 */
3823 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCr0);
3824 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32ShadowCr0);
3825 if (u32Cr0Mask != pVCpu->hm.s.vmx.u32Cr0Mask)
3826 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32Cr0Mask);
3827 if (uProcCtls != pVCpu->hm.s.vmx.u32ProcCtls)
3828 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
3829 if (uXcptBitmap != pVCpu->hm.s.vmx.u32XcptBitmap)
3830 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
3831 AssertRCReturn(rc, rc);
3832
3833 /* Update our caches. */
3834 pVCpu->hm.s.vmx.u32Cr0Mask = u32Cr0Mask;
3835 pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls;
3836 pVCpu->hm.s.vmx.u32XcptBitmap = uXcptBitmap;
3837
3838 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR0);
3839
3840 Log4Func(("u32Cr0Mask=%#RX32 u32ShadowCr0=%#RX32 u32GuestCr0=%#RX32 (fSetCr0=%#RX32 fZapCr0=%#RX32\n", u32Cr0Mask,
3841 u32ShadowCr0, u32GuestCr0, fSetCr0, fZapCr0));
3842 }
3843
3844 return VINF_SUCCESS;
3845}
3846
3847
3848/**
3849 * Exports the guest control registers (CR3, CR4) into the guest-state area
3850 * in the VMCS.
3851 *
3852 * @returns VBox strict status code.
3853 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
3854 * without unrestricted guest access and the VMMDev is not presently
3855 * mapped (e.g. EFI32).
3856 *
3857 * @param pVCpu The cross context virtual CPU structure.
3858 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3859 * out-of-sync. Make sure to update the required fields
3860 * before using them.
3861 *
3862 * @remarks No-long-jump zone!!!
3863 */
3864static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
3865{
3866 int rc = VINF_SUCCESS;
3867 PVM pVM = pVCpu->CTX_SUFF(pVM);
3868
3869 /*
3870 * Guest CR2.
3871 * It's always loaded in the assembler code. Nothing to do here.
3872 */
3873
3874 /*
3875 * Guest CR3.
3876 */
3877 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR3)
3878 {
3879 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
3880
3881 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
3882 if (pVM->hm.s.fNestedPaging)
3883 {
3884 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
3885
3886 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
3887 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
3888 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
3889 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
3890
3891 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
3892 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
3893 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
3894
3895 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
3896 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
3897 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
3898 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3899 AssertMsg( !((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
3900 || (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY),
3901 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3902
3903 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
3904 AssertRCReturn(rc, rc);
3905
3906 if ( pVM->hm.s.vmx.fUnrestrictedGuest
3907 || CPUMIsGuestPagingEnabledEx(pMixedCtx))
3908 {
3909 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
3910 if (CPUMIsGuestInPAEModeEx(pMixedCtx))
3911 {
3912 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
3913 AssertRCReturn(rc, rc);
3914 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u);
3915 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u);
3916 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u);
3917 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u);
3918 AssertRCReturn(rc, rc);
3919 }
3920
3921 /*
3922 * The guest's view of its CR3 is unblemished with Nested Paging when the
3923 * guest is using paging or we have unrestricted guest execution to handle
3924 * the guest when it's not using paging.
3925 */
3926 GCPhysGuestCR3 = pMixedCtx->cr3;
3927 }
3928 else
3929 {
3930 /*
3931 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
3932 * thinks it accesses physical memory directly, we use our identity-mapped
3933 * page table to map guest-linear to guest-physical addresses. EPT takes care
3934 * of translating it to host-physical addresses.
3935 */
3936 RTGCPHYS GCPhys;
3937 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
3938
3939 /* We obtain it here every time as the guest could have relocated this PCI region. */
3940 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
3941 if (RT_SUCCESS(rc))
3942 { /* likely */ }
3943 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
3944 {
3945 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
3946 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
3947 }
3948 else
3949 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
3950
3951 GCPhysGuestCR3 = GCPhys;
3952 }
3953
3954 Log4Func(("u32GuestCr3=%#RGp (GstN)\n", GCPhysGuestCR3));
3955 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
3956 AssertRCReturn(rc, rc);
3957 }
3958 else
3959 {
3960 /* Non-nested paging case, just use the hypervisor's CR3. */
3961 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
3962
3963 Log4Func(("u32GuestCr3=%#RHv (HstN)\n", HCPhysGuestCR3));
3964 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
3965 AssertRCReturn(rc, rc);
3966 }
3967
3968 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR3);
3969 }
3970
3971 /*
3972 * Guest CR4.
3973 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
3974 */
3975 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR4)
3976 {
3977 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
3978 Assert(!RT_HI_U32(pMixedCtx->cr4));
3979
3980 uint32_t u32GuestCr4 = pMixedCtx->cr4;
3981 uint32_t const u32ShadowCr4 = pMixedCtx->cr4;
3982
3983 /*
3984 * Setup VT-x's view of the guest CR4.
3985 *
3986 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
3987 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
3988 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
3989 *
3990 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
3991 */
3992 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3993 {
3994 Assert(pVM->hm.s.vmx.pRealModeTSS);
3995 Assert(PDMVmmDevHeapIsEnabled(pVM));
3996 u32GuestCr4 &= ~X86_CR4_VME;
3997 }
3998
3999 if (pVM->hm.s.fNestedPaging)
4000 {
4001 if ( !CPUMIsGuestPagingEnabledEx(pMixedCtx)
4002 && !pVM->hm.s.vmx.fUnrestrictedGuest)
4003 {
4004 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
4005 u32GuestCr4 |= X86_CR4_PSE;
4006 /* Our identity mapping is a 32-bit page directory. */
4007 u32GuestCr4 &= ~X86_CR4_PAE;
4008 }
4009 /* else use guest CR4.*/
4010 }
4011 else
4012 {
4013 /*
4014 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
4015 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
4016 */
4017 switch (pVCpu->hm.s.enmShadowMode)
4018 {
4019 case PGMMODE_REAL: /* Real-mode. */
4020 case PGMMODE_PROTECTED: /* Protected mode without paging. */
4021 case PGMMODE_32_BIT: /* 32-bit paging. */
4022 {
4023 u32GuestCr4 &= ~X86_CR4_PAE;
4024 break;
4025 }
4026
4027 case PGMMODE_PAE: /* PAE paging. */
4028 case PGMMODE_PAE_NX: /* PAE paging with NX. */
4029 {
4030 u32GuestCr4 |= X86_CR4_PAE;
4031 break;
4032 }
4033
4034 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
4035 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
4036#ifdef VBOX_ENABLE_64_BITS_GUESTS
4037 break;
4038#endif
4039 default:
4040 AssertFailed();
4041 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4042 }
4043 }
4044
4045 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
4046 uint64_t const fSetCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
4047 uint64_t const fZapCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
4048 u32GuestCr4 |= fSetCr4;
4049 u32GuestCr4 &= fZapCr4;
4050
4051 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them,
4052 that would cause a VM-exit. */
4053 uint32_t u32Cr4Mask = X86_CR4_VME
4054 | X86_CR4_PAE
4055 | X86_CR4_PGE
4056 | X86_CR4_PSE
4057 | X86_CR4_VMXE;
4058 if (pVM->cpum.ro.HostFeatures.fXSaveRstor)
4059 u32Cr4Mask |= X86_CR4_OSXSAVE;
4060 if (pVM->cpum.ro.GuestFeatures.fPcid)
4061 u32Cr4Mask |= X86_CR4_PCIDE;
4062
4063 /* Write VT-x's view of the guest CR4, the CR4 modify mask and the read-only CR4 shadow
4064 into the VMCS and update our cache. */
4065 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCr4);
4066 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32ShadowCr4);
4067 if (pVCpu->hm.s.vmx.u32Cr4Mask != u32Cr4Mask)
4068 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32Cr4Mask);
4069 AssertRCReturn(rc, rc);
4070 pVCpu->hm.s.vmx.u32Cr4Mask = u32Cr4Mask;
4071
4072 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
4073 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
4074
4075 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4);
4076
4077 Log4Func(("u32GuestCr4=%#RX32 u32ShadowCr4=%#RX32 (fSetCr4=%#RX32 fZapCr4=%#RX32)\n", u32GuestCr4, u32ShadowCr4, fSetCr4,
4078 fZapCr4));
4079 }
4080 return rc;
4081}
4082
4083
4084/**
4085 * Exports the guest debug registers into the guest-state area in the VMCS.
4086 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
4087 *
4088 * This also sets up whether \#DB and MOV DRx accesses cause VM-exits.
4089 *
4090 * @returns VBox status code.
4091 * @param pVCpu The cross context virtual CPU structure.
4092 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4093 * out-of-sync. Make sure to update the required fields
4094 * before using them.
4095 *
4096 * @remarks No-long-jump zone!!!
4097 */
4098static int hmR0VmxExportSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4099{
4100 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4101
4102#ifdef VBOX_STRICT
4103 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
4104 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
4105 {
4106 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
4107 Assert((pMixedCtx->dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0); /* Bits 63:32, 15, 14, 12, 11 are reserved. */
4108 Assert((pMixedCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); /* Bit 10 is reserved (RA1). */
4109 }
4110#endif
4111
4112 bool fSteppingDB = false;
4113 bool fInterceptMovDRx = false;
4114 uint32_t uProcCtls = pVCpu->hm.s.vmx.u32ProcCtls;
4115 if (pVCpu->hm.s.fSingleInstruction)
4116 {
4117 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
4118 PVM pVM = pVCpu->CTX_SUFF(pVM);
4119 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
4120 {
4121 uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
4122 Assert(fSteppingDB == false);
4123 }
4124 else
4125 {
4126 pMixedCtx->eflags.u32 |= X86_EFL_TF;
4127 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_RFLAGS;
4128 pVCpu->hm.s.fClearTrapFlag = true;
4129 fSteppingDB = true;
4130 }
4131 }
4132
4133 uint32_t u32GuestDr7;
4134 if ( fSteppingDB
4135 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
4136 {
4137 /*
4138 * Use the combined guest and host DRx values found in the hypervisor register set
4139 * because the debugger has breakpoints active or someone is single stepping on the
4140 * host side without a monitor trap flag.
4141 *
4142 * Note! DBGF expects a clean DR6 state before executing guest code.
4143 */
4144#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
4145 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4146 && !CPUMIsHyperDebugStateActivePending(pVCpu))
4147 {
4148 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4149 Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
4150 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
4151 }
4152 else
4153#endif
4154 if (!CPUMIsHyperDebugStateActive(pVCpu))
4155 {
4156 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
4157 Assert(CPUMIsHyperDebugStateActive(pVCpu));
4158 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
4159 }
4160
4161 /* Update DR7 with the hypervisor value (other DRx registers are handled by CPUM one way or another). */
4162 u32GuestDr7 = (uint32_t)CPUMGetHyperDR7(pVCpu);
4163 pVCpu->hm.s.fUsingHyperDR7 = true;
4164 fInterceptMovDRx = true;
4165 }
4166 else
4167 {
4168 /*
4169 * If the guest has enabled debug registers, we need to load them prior to
4170 * executing guest code so they'll trigger at the right time.
4171 */
4172 if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
4173 {
4174#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
4175 if ( CPUMIsGuestInLongModeEx(pMixedCtx)
4176 && !CPUMIsGuestDebugStateActivePending(pVCpu))
4177 {
4178 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4179 Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
4180 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
4181 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4182 }
4183 else
4184#endif
4185 if (!CPUMIsGuestDebugStateActive(pVCpu))
4186 {
4187 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
4188 Assert(CPUMIsGuestDebugStateActive(pVCpu));
4189 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
4190 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
4191 }
4192 Assert(!fInterceptMovDRx);
4193 }
4194 /*
4195 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
4196 * must intercept #DB in order to maintain a correct DR6 guest value, and
4197 * because we need to intercept it to prevent nested #DBs from hanging the
4198 * CPU, we end up always having to intercept it. See hmR0VmxInitXcptBitmap.
4199 */
4200#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
4201 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu)
4202 && !CPUMIsGuestDebugStateActive(pVCpu))
4203#else
4204 else if (!CPUMIsGuestDebugStateActive(pVCpu))
4205#endif
4206 {
4207 fInterceptMovDRx = true;
4208 }
4209
4210 /* Update DR7 with the actual guest value. */
4211 u32GuestDr7 = pMixedCtx->dr[7];
4212 pVCpu->hm.s.fUsingHyperDR7 = false;
4213 }
4214
4215 if (fInterceptMovDRx)
4216 uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4217 else
4218 uProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
4219
4220 /*
4221 * Update the processor-based VM-execution controls with the MOV-DRx intercepts and the
4222 * monitor-trap flag and update our cache.
4223 */
4224 if (uProcCtls != pVCpu->hm.s.vmx.u32ProcCtls)
4225 {
4226 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
4227 AssertRCReturn(rc2, rc2);
4228 pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls;
4229 }
4230
4231 /*
4232 * Update guest DR7.
4233 */
4234 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, u32GuestDr7);
4235 AssertRCReturn(rc, rc);
4236
4237 return VINF_SUCCESS;
4238}
4239
4240
4241#ifdef VBOX_STRICT
4242/**
4243 * Strict function to validate segment registers.
4244 *
4245 * @param pVCpu The cross context virtual CPU structure.
4246 * @param pCtx Pointer to the guest-CPU context.
4247 *
4248 * @remarks Will import guest CR0 on strict builds during validation of
4249 * segments.
4250 */
4251static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu, PCCPUMCTX pCtx)
4252{
4253 /*
4254 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
4255 *
4256 * The reason we check for attribute value 0 in this function and not just the unusable bit is
4257 * because hmR0VmxExportGuestSegmentReg() only updates the VMCS' copy of the value with the unusable bit
4258 * and doesn't change the guest-context value.
4259 */
4260 PVM pVM = pVCpu->CTX_SUFF(pVM);
4261 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);
4262 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
4263 && ( !CPUMIsGuestInRealModeEx(pCtx)
4264 && !CPUMIsGuestInV86ModeEx(pCtx)))
4265 {
4266 /* Protected mode checks */
4267 /* CS */
4268 Assert(pCtx->cs.Attr.n.u1Present);
4269 Assert(!(pCtx->cs.Attr.u & 0xf00));
4270 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
4271 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
4272 || !(pCtx->cs.Attr.n.u1Granularity));
4273 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
4274 || (pCtx->cs.Attr.n.u1Granularity));
4275 /* CS cannot be loaded with NULL in protected mode. */
4276 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
4277 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
4278 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
4279 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
4280 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
4281 else
4282 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
4283 /* SS */
4284 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4285 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
4286 if ( !(pCtx->cr0 & X86_CR0_PE)
4287 || pCtx->cs.Attr.n.u4Type == 3)
4288 {
4289 Assert(!pCtx->ss.Attr.n.u2Dpl);
4290 }
4291 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
4292 {
4293 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
4294 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
4295 Assert(pCtx->ss.Attr.n.u1Present);
4296 Assert(!(pCtx->ss.Attr.u & 0xf00));
4297 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
4298 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
4299 || !(pCtx->ss.Attr.n.u1Granularity));
4300 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
4301 || (pCtx->ss.Attr.n.u1Granularity));
4302 }
4303 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSegmentReg(). */
4304 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
4305 {
4306 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4307 Assert(pCtx->ds.Attr.n.u1Present);
4308 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
4309 Assert(!(pCtx->ds.Attr.u & 0xf00));
4310 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
4311 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
4312 || !(pCtx->ds.Attr.n.u1Granularity));
4313 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
4314 || (pCtx->ds.Attr.n.u1Granularity));
4315 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4316 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
4317 }
4318 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
4319 {
4320 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4321 Assert(pCtx->es.Attr.n.u1Present);
4322 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
4323 Assert(!(pCtx->es.Attr.u & 0xf00));
4324 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
4325 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
4326 || !(pCtx->es.Attr.n.u1Granularity));
4327 Assert( !(pCtx->es.u32Limit & 0xfff00000)
4328 || (pCtx->es.Attr.n.u1Granularity));
4329 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4330 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
4331 }
4332 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
4333 {
4334 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4335 Assert(pCtx->fs.Attr.n.u1Present);
4336 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
4337 Assert(!(pCtx->fs.Attr.u & 0xf00));
4338 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
4339 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
4340 || !(pCtx->fs.Attr.n.u1Granularity));
4341 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
4342 || (pCtx->fs.Attr.n.u1Granularity));
4343 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4344 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4345 }
4346 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
4347 {
4348 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
4349 Assert(pCtx->gs.Attr.n.u1Present);
4350 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
4351 Assert(!(pCtx->gs.Attr.u & 0xf00));
4352 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
4353 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
4354 || !(pCtx->gs.Attr.n.u1Granularity));
4355 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
4356 || (pCtx->gs.Attr.n.u1Granularity));
4357 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
4358 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
4359 }
4360 /* 64-bit capable CPUs. */
4361# if HC_ARCH_BITS == 64
4362 Assert(!RT_HI_U32(pCtx->cs.u64Base));
4363 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
4364 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
4365 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
4366# endif
4367 }
4368 else if ( CPUMIsGuestInV86ModeEx(pCtx)
4369 || ( CPUMIsGuestInRealModeEx(pCtx)
4370 && !pVM->hm.s.vmx.fUnrestrictedGuest))
4371 {
4372 /* Real and v86 mode checks. */
4373 /* hmR0VmxExportGuestSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
4374 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
4375 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4376 {
4377 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
4378 }
4379 else
4380 {
4381 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
4382 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
4383 }
4384
4385 /* CS */
4386 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
4387 Assert(pCtx->cs.u32Limit == 0xffff);
4388 Assert(u32CSAttr == 0xf3);
4389 /* SS */
4390 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
4391 Assert(pCtx->ss.u32Limit == 0xffff);
4392 Assert(u32SSAttr == 0xf3);
4393 /* DS */
4394 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
4395 Assert(pCtx->ds.u32Limit == 0xffff);
4396 Assert(u32DSAttr == 0xf3);
4397 /* ES */
4398 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
4399 Assert(pCtx->es.u32Limit == 0xffff);
4400 Assert(u32ESAttr == 0xf3);
4401 /* FS */
4402 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
4403 Assert(pCtx->fs.u32Limit == 0xffff);
4404 Assert(u32FSAttr == 0xf3);
4405 /* GS */
4406 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
4407 Assert(pCtx->gs.u32Limit == 0xffff);
4408 Assert(u32GSAttr == 0xf3);
4409 /* 64-bit capable CPUs. */
4410# if HC_ARCH_BITS == 64
4411 Assert(!RT_HI_U32(pCtx->cs.u64Base));
4412 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
4413 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
4414 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
4415# endif
4416 }
4417}
4418#endif /* VBOX_STRICT */
4419
4420
4421/**
4422 * Exports a guest segment register into the guest-state area in the VMCS.
4423 *
4424 * @returns VBox status code.
4425 * @param pVCpu The cross context virtual CPU structure.
4426 * @param idxSel Index of the selector in the VMCS.
4427 * @param idxLimit Index of the segment limit in the VMCS.
4428 * @param idxBase Index of the segment base in the VMCS.
4429 * @param idxAccess Index of the access rights of the segment in the VMCS.
4430 * @param pSelReg Pointer to the segment selector.
4431 *
4432 * @remarks No-long-jump zone!!!
4433 */
4434static int hmR0VmxExportGuestSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
4435 PCCPUMSELREG pSelReg)
4436{
4437 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
4438 rc |= VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
4439 rc |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
4440 AssertRCReturn(rc, rc);
4441
4442 uint32_t u32Access = pSelReg->Attr.u;
4443 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4444 {
4445 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
4446 u32Access = 0xf3;
4447 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
4448 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
4449 }
4450 else
4451 {
4452 /*
4453 * The way to differentiate between whether this is really a null selector or was just
4454 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
4455 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
4456 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
4457 * NULL selectors loaded in protected-mode have their attribute as 0.
4458 */
4459 if (!u32Access)
4460 u32Access = X86DESCATTR_UNUSABLE;
4461 }
4462
4463 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
4464 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
4465 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
4466
4467 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
4468 AssertRCReturn(rc, rc);
4469 return rc;
4470}
4471
4472
4473/**
4474 * Exports the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
4475 * into the guest-state area in the VMCS.
4476 *
4477 * @returns VBox status code.
4478 * @param pVCpu The cross context virtual CPU structure.
4479 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4480 * out-of-sync. Make sure to update the required fields
4481 * before using them.
4482 *
4483 * @remarks Will import guest CR0 on strict builds during validation of
4484 * segments.
4485 * @remarks No-long-jump zone!!!
4486 */
4487static int hmR0VmxExportGuestSegmentRegs(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
4488{
4489 int rc = VERR_INTERNAL_ERROR_5;
4490 PVM pVM = pVCpu->CTX_SUFF(pVM);
4491
4492 /*
4493 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
4494 */
4495 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
4496 {
4497#ifdef VBOX_WITH_REM
4498 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
4499 {
4500 Assert(pVM->hm.s.vmx.pRealModeTSS);
4501 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
4502 if ( pVCpu->hm.s.vmx.fWasInRealMode
4503 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
4504 {
4505 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
4506 in real-mode (e.g. OpenBSD 4.0) */
4507 REMFlushTBs(pVM);
4508 Log4Func(("Switch to protected mode detected!\n"));
4509 pVCpu->hm.s.vmx.fWasInRealMode = false;
4510 }
4511 }
4512#endif
4513 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CS)
4514 {
4515 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
4516 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4517 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pMixedCtx->cs.Attr.u;
4518 rc = HMVMX_EXPORT_SREG(CS, &pMixedCtx->cs);
4519 AssertRCReturn(rc, rc);
4520 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CS);
4521 }
4522
4523 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SS)
4524 {
4525 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
4526 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4527 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pMixedCtx->ss.Attr.u;
4528 rc = HMVMX_EXPORT_SREG(SS, &pMixedCtx->ss);
4529 AssertRCReturn(rc, rc);
4530 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SS);
4531 }
4532
4533 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_DS)
4534 {
4535 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
4536 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4537 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pMixedCtx->ds.Attr.u;
4538 rc = HMVMX_EXPORT_SREG(DS, &pMixedCtx->ds);
4539 AssertRCReturn(rc, rc);
4540 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_DS);
4541 }
4542
4543 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_ES)
4544 {
4545 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
4546 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4547 pVCpu->hm.s.vmx.RealMode.AttrES.u = pMixedCtx->es.Attr.u;
4548 rc = HMVMX_EXPORT_SREG(ES, &pMixedCtx->es);
4549 AssertRCReturn(rc, rc);
4550 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_ES);
4551 }
4552
4553 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_FS)
4554 {
4555 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
4556 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4557 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pMixedCtx->fs.Attr.u;
4558 rc = HMVMX_EXPORT_SREG(FS, &pMixedCtx->fs);
4559 AssertRCReturn(rc, rc);
4560 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_FS);
4561 }
4562
4563 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GS)
4564 {
4565 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
4566 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4567 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pMixedCtx->gs.Attr.u;
4568 rc = HMVMX_EXPORT_SREG(GS, &pMixedCtx->gs);
4569 AssertRCReturn(rc, rc);
4570 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GS);
4571 }
4572
4573#ifdef VBOX_STRICT
4574 hmR0VmxValidateSegmentRegs(pVCpu, pMixedCtx);
4575#endif
4576
4577 /* Update the exit history entry with the correct CS.BASE + RIP. */
4578 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP)
4579 EMR0HistoryUpdatePC(pVCpu, pMixedCtx->cs.u64Base + pMixedCtx->rip, true);
4580
4581 Log4Func(("CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base,
4582 pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
4583 }
4584
4585 /*
4586 * Guest TR.
4587 */
4588 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_TR)
4589 {
4590 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
4591
4592 /*
4593 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
4594 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
4595 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
4596 */
4597 uint16_t u16Sel = 0;
4598 uint32_t u32Limit = 0;
4599 uint64_t u64Base = 0;
4600 uint32_t u32AccessRights = 0;
4601
4602 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
4603 {
4604 u16Sel = pMixedCtx->tr.Sel;
4605 u32Limit = pMixedCtx->tr.u32Limit;
4606 u64Base = pMixedCtx->tr.u64Base;
4607 u32AccessRights = pMixedCtx->tr.Attr.u;
4608 }
4609 else
4610 {
4611 Assert(pVM->hm.s.vmx.pRealModeTSS);
4612 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
4613
4614 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
4615 RTGCPHYS GCPhys;
4616 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
4617 AssertRCReturn(rc, rc);
4618
4619 X86DESCATTR DescAttr;
4620 DescAttr.u = 0;
4621 DescAttr.n.u1Present = 1;
4622 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
4623
4624 u16Sel = 0;
4625 u32Limit = HM_VTX_TSS_SIZE;
4626 u64Base = GCPhys; /* in real-mode phys = virt. */
4627 u32AccessRights = DescAttr.u;
4628 }
4629
4630 /* Validate. */
4631 Assert(!(u16Sel & RT_BIT(2)));
4632 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
4633 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
4634 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
4635 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
4636 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
4637 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
4638 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
4639 Assert( (u32Limit & 0xfff) == 0xfff
4640 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
4641 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
4642 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
4643
4644 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_TR_SEL, u16Sel);
4645 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit);
4646 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights);
4647 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base);
4648 AssertRCReturn(rc, rc);
4649
4650 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_TR);
4651 Log4Func(("TR base=%#RX64\n", pMixedCtx->tr.u64Base));
4652 }
4653
4654 /*
4655 * Guest GDTR.
4656 */
4657 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GDTR)
4658 {
4659 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
4660
4661 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt);
4662 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt);
4663 AssertRCReturn(rc, rc);
4664
4665 /* Validate. */
4666 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4667
4668 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
4669 Log4Func(("GDTR base=%#RX64\n", pMixedCtx->gdtr.pGdt));
4670 }
4671
4672 /*
4673 * Guest LDTR.
4674 */
4675 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_LDTR)
4676 {
4677 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
4678
4679 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
4680 uint32_t u32Access = 0;
4681 if (!pMixedCtx->ldtr.Attr.u)
4682 u32Access = X86DESCATTR_UNUSABLE;
4683 else
4684 u32Access = pMixedCtx->ldtr.Attr.u;
4685
4686 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_LDTR_SEL, pMixedCtx->ldtr.Sel);
4687 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit);
4688 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access);
4689 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base);
4690 AssertRCReturn(rc, rc);
4691
4692 /* Validate. */
4693 if (!(u32Access & X86DESCATTR_UNUSABLE))
4694 {
4695 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
4696 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
4697 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
4698 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
4699 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
4700 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
4701 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
4702 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
4703 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
4704 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
4705 }
4706
4707 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
4708 Log4Func(("LDTR base=%#RX64\n", pMixedCtx->ldtr.u64Base));
4709 }
4710
4711 /*
4712 * Guest IDTR.
4713 */
4714 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_IDTR)
4715 {
4716 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
4717
4718 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt);
4719 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt);
4720 AssertRCReturn(rc, rc);
4721
4722 /* Validate. */
4723 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
4724
4725 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
4726 Log4Func(("IDTR base=%#RX64\n", pMixedCtx->idtr.pIdt));
4727 }
4728
4729 return VINF_SUCCESS;
4730}
4731
4732
4733/**
4734 * Exports certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
4735 * areas.
4736 *
4737 * These MSRs will automatically be loaded to the host CPU on every successful
4738 * VM-entry and stored from the host CPU on every successful VM-exit. This also
4739 * creates/updates MSR slots for the host MSRs. The actual host MSR values are
4740 * -not- updated here for performance reasons. See hmR0VmxExportHostMsrs().
4741 *
4742 * Also exports the guest sysenter MSRs into the guest-state area in the VMCS.
4743 *
4744 * @returns VBox status code.
4745 * @param pVCpu The cross context virtual CPU structure.
4746 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4747 * out-of-sync. Make sure to update the required fields
4748 * before using them.
4749 *
4750 * @remarks No-long-jump zone!!!
4751 */
4752static int hmR0VmxExportGuestMsrs(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
4753{
4754 AssertPtr(pVCpu);
4755 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
4756
4757 /*
4758 * MSRs that we use the auto-load/store MSR area in the VMCS.
4759 * For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs().
4760 */
4761 PVM pVM = pVCpu->CTX_SUFF(pVM);
4762 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
4763 {
4764 if (pVM->hm.s.fAllow64BitGuests)
4765 {
4766#if HC_ARCH_BITS == 32
4767 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_KERNEL_GS_BASE);
4768
4769 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false, NULL);
4770 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false, NULL);
4771 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK, pMixedCtx->msrSFMASK, false, NULL);
4772 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false, NULL);
4773 AssertRCReturn(rc, rc);
4774# ifdef LOG_ENABLED
4775 PCVMXAUTOMSR pMsr = (PCVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
4776 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
4777 Log4Func(("MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value));
4778# endif
4779#endif
4780 }
4781 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_AUTO_MSRS);
4782 }
4783
4784 /*
4785 * Guest Sysenter MSRs.
4786 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
4787 * VM-exits on WRMSRs for these MSRs.
4788 */
4789 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
4790 {
4791 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4792
4793 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
4794 {
4795 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs);
4796 AssertRCReturn(rc, rc);
4797 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR);
4798 }
4799
4800 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
4801 {
4802 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip);
4803 AssertRCReturn(rc, rc);
4804 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
4805 }
4806
4807 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
4808 {
4809 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp);
4810 AssertRCReturn(rc, rc);
4811 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
4812 }
4813 }
4814
4815 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR)
4816 {
4817 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
4818
4819 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
4820 {
4821 /*
4822 * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
4823 * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
4824 */
4825 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
4826 {
4827 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER);
4828 AssertRCReturn(rc,rc);
4829 Log4Func(("EFER=%#RX64\n", pMixedCtx->msrEFER));
4830 }
4831 else
4832 {
4833 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */,
4834 NULL /* pfAddedAndUpdated */);
4835 AssertRCReturn(rc, rc);
4836
4837 /* We need to intercept reads too, see @bugref{7386#c16}. */
4838 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
4839 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
4840 Log4Func(("MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pMixedCtx->msrEFER,
4841 pVCpu->hm.s.vmx.cMsrs));
4842 }
4843 }
4844 else if (!pVM->hm.s.vmx.fSupportsVmcsEfer)
4845 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER);
4846 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);
4847 }
4848
4849 return VINF_SUCCESS;
4850}
4851
4852
4853#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
4854/**
4855 * Check if guest state allows safe use of 32-bit switcher again.
4856 *
4857 * Segment bases and protected mode structures must be 32-bit addressable
4858 * because the 32-bit switcher will ignore high dword when writing these VMCS
4859 * fields. See @bugref{8432} for details.
4860 *
4861 * @returns true if safe, false if must continue to use the 64-bit switcher.
4862 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4863 * out-of-sync. Make sure to update the required fields
4864 * before using them.
4865 *
4866 * @remarks No-long-jump zone!!!
4867 */
4868static bool hmR0VmxIs32BitSwitcherSafe(PCCPUMCTX pMixedCtx)
4869{
4870 if (pMixedCtx->gdtr.pGdt & UINT64_C(0xffffffff00000000)) return false;
4871 if (pMixedCtx->idtr.pIdt & UINT64_C(0xffffffff00000000)) return false;
4872 if (pMixedCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000)) return false;
4873 if (pMixedCtx->tr.u64Base & UINT64_C(0xffffffff00000000)) return false;
4874 if (pMixedCtx->es.u64Base & UINT64_C(0xffffffff00000000)) return false;
4875 if (pMixedCtx->cs.u64Base & UINT64_C(0xffffffff00000000)) return false;
4876 if (pMixedCtx->ss.u64Base & UINT64_C(0xffffffff00000000)) return false;
4877 if (pMixedCtx->ds.u64Base & UINT64_C(0xffffffff00000000)) return false;
4878 if (pMixedCtx->fs.u64Base & UINT64_C(0xffffffff00000000)) return false;
4879 if (pMixedCtx->gs.u64Base & UINT64_C(0xffffffff00000000)) return false;
4880
4881 /* All good, bases are 32-bit. */
4882 return true;
4883}
4884#endif
4885
4886
4887/**
4888 * Selects up the appropriate function to run guest code.
4889 *
4890 * @returns VBox status code.
4891 * @param pVCpu The cross context virtual CPU structure.
4892 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4893 * out-of-sync. Make sure to update the required fields
4894 * before using them.
4895 *
4896 * @remarks No-long-jump zone!!!
4897 */
4898static int hmR0VmxSelectVMRunHandler(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
4899{
4900 if (CPUMIsGuestInLongModeEx(pMixedCtx))
4901 {
4902#ifndef VBOX_ENABLE_64_BITS_GUESTS
4903 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4904#endif
4905 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
4906#if HC_ARCH_BITS == 32
4907 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
4908 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
4909 {
4910#ifdef VBOX_STRICT
4911 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4912 {
4913 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4914 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
4915 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
4916 AssertMsg(fCtxChanged & ( HM_CHANGED_VMX_EXIT_CTLS
4917 | HM_CHANGED_VMX_ENTRY_CTLS
4918 | HM_CHANGED_GUEST_EFER_MSR), ("fCtxChanged=%#RX64\n", fCtxChanged));
4919 }
4920#endif
4921 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
4922
4923 /* Mark that we've switched to 64-bit handler, we can't safely switch back to 32-bit for
4924 the rest of the VM run (until VM reset). See @bugref{8432#c7}. */
4925 pVCpu->hm.s.vmx.fSwitchedTo64on32 = true;
4926 Log4Func(("Selected 64-bit switcher\n"));
4927 }
4928#else
4929 /* 64-bit host. */
4930 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
4931#endif
4932 }
4933 else
4934 {
4935 /* Guest is not in long mode, use the 32-bit handler. */
4936#if HC_ARCH_BITS == 32
4937 if ( pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32
4938 && !pVCpu->hm.s.vmx.fSwitchedTo64on32 /* If set, guest mode change does not imply switcher change. */
4939 && pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
4940 {
4941# ifdef VBOX_STRICT
4942 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
4943 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
4944 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
4945 AssertMsg(fCtxChanged & ( HM_CHANGED_VMX_EXIT_CTLS
4946 | HM_CHANGED_VMX_ENTRY_CTLS
4947 | HM_CHANGED_GUEST_EFER_MSR), ("fCtxChanged=%#RX64\n", fCtxChanged));
4948# endif
4949 }
4950# ifdef VBOX_ENABLE_64_BITS_GUESTS
4951 /*
4952 * Keep using the 64-bit switcher even though we're in 32-bit because of bad Intel
4953 * design, see @bugref{8432#c7}. If real-on-v86 mode is active, clear the 64-bit
4954 * switcher flag because now we know the guest is in a sane state where it's safe
4955 * to use the 32-bit switcher. Otherwise check the guest state if it's safe to use
4956 * the much faster 32-bit switcher again.
4957 */
4958 if (!pVCpu->hm.s.vmx.fSwitchedTo64on32)
4959 {
4960 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32)
4961 Log4Func(("Selected 32-bit switcher\n"));
4962 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4963 }
4964 else
4965 {
4966 Assert(pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64);
4967 if ( pVCpu->hm.s.vmx.RealMode.fRealOnV86Active
4968 || hmR0VmxIs32BitSwitcherSafe(pMixedCtx))
4969 {
4970 pVCpu->hm.s.vmx.fSwitchedTo64on32 = false;
4971 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4972 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR
4973 | HM_CHANGED_VMX_ENTRY_CTLS
4974 | HM_CHANGED_VMX_EXIT_CTLS
4975 | HM_CHANGED_HOST_CONTEXT);
4976 Log4Func(("Selected 32-bit switcher (safe)\n"));
4977 }
4978 }
4979# else
4980 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4981# endif
4982#else
4983 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4984#endif
4985 }
4986 Assert(pVCpu->hm.s.vmx.pfnStartVM);
4987 return VINF_SUCCESS;
4988}
4989
4990
4991/**
4992 * Wrapper for running the guest code in VT-x.
4993 *
4994 * @returns VBox status code, no informational status codes.
4995 * @param pVCpu The cross context virtual CPU structure.
4996 * @param pCtx Pointer to the guest-CPU context.
4997 *
4998 * @remarks No-long-jump zone!!!
4999 */
5000DECLINLINE(int) hmR0VmxRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx)
5001{
5002 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
5003 pCtx->fExtrn |= HMVMX_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
5004
5005 /*
5006 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses
5007 * floating-point operations using SSE instructions. Some XMM registers (XMM6-XMM15) are
5008 * callee-saved and thus the need for this XMM wrapper.
5009 *
5010 * See MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage".
5011 */
5012 bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
5013 /** @todo Add stats for resume vs launch. */
5014 PVM pVM = pVCpu->CTX_SUFF(pVM);
5015#ifdef VBOX_WITH_KERNEL_USING_XMM
5016 int rc = hmR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
5017#else
5018 int rc = pVCpu->hm.s.vmx.pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
5019#endif
5020 AssertMsg(rc <= VINF_SUCCESS, ("%Rrc\n", rc));
5021 return rc;
5022}
5023
5024
5025/**
5026 * Reports world-switch error and dumps some useful debug info.
5027 *
5028 * @param pVCpu The cross context virtual CPU structure.
5029 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
5030 * @param pCtx Pointer to the guest-CPU context.
5031 * @param pVmxTransient Pointer to the VMX transient structure (only
5032 * exitReason updated).
5033 */
5034static void hmR0VmxReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
5035{
5036 Assert(pVCpu);
5037 Assert(pCtx);
5038 Assert(pVmxTransient);
5039 HMVMX_ASSERT_PREEMPT_SAFE();
5040
5041 Log4Func(("VM-entry failure: %Rrc\n", rcVMRun));
5042 switch (rcVMRun)
5043 {
5044 case VERR_VMX_INVALID_VMXON_PTR:
5045 AssertFailed();
5046 break;
5047 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
5048 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
5049 {
5050 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
5051 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
5052 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
5053 AssertRC(rc);
5054
5055 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
5056 /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted().
5057 Cannot do it here as we may have been long preempted. */
5058
5059#ifdef VBOX_STRICT
5060 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
5061 pVmxTransient->uExitReason));
5062 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));
5063 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
5064 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
5065 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
5066 else
5067 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
5068 Log4(("Entered host CPU %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));
5069 Log4(("Current host CPU %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));
5070
5071 /* VMX control bits. */
5072 uint32_t u32Val;
5073 uint64_t u64Val;
5074 RTHCUINTREG uHCReg;
5075 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
5076 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
5077 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
5078 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
5079 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
5080 {
5081 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
5082 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
5083 }
5084 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
5085 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
5086 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
5087 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
5088 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
5089 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
5090 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
5091 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
5092 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
5093 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
5094 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
5095 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
5096 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
5097 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
5098 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
5099 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
5100 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
5101 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
5102 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
5103 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
5104 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
5105 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
5106 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
5107 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
5108 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
5109 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
5110 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
5111 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
5112 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
5113 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
5114 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
5115 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
5116 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
5117 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
5118 if (pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
5119 {
5120 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
5121 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
5122 }
5123
5124 /* Guest bits. */
5125 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc);
5126 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val));
5127 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc);
5128 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val));
5129 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
5130 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
5131 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid)
5132 {
5133 rc = VMXReadVmcs32(VMX_VMCS16_VPID, &u32Val); AssertRC(rc);
5134 Log4(("VMX_VMCS16_VPID %u\n", u32Val));
5135 }
5136
5137 /* Host bits. */
5138 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
5139 Log4(("Host CR0 %#RHr\n", uHCReg));
5140 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
5141 Log4(("Host CR3 %#RHr\n", uHCReg));
5142 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
5143 Log4(("Host CR4 %#RHr\n", uHCReg));
5144
5145 RTGDTR HostGdtr;
5146 PCX86DESCHC pDesc;
5147 ASMGetGDTR(&HostGdtr);
5148 rc = VMXReadVmcs32(VMX_VMCS16_HOST_CS_SEL, &u32Val); AssertRC(rc);
5149 Log4(("Host CS %#08x\n", u32Val));
5150 if (u32Val < HostGdtr.cbGdt)
5151 {
5152 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5153 hmR0DumpDescriptor(pDesc, u32Val, "CS: ");
5154 }
5155
5156 rc = VMXReadVmcs32(VMX_VMCS16_HOST_DS_SEL, &u32Val); AssertRC(rc);
5157 Log4(("Host DS %#08x\n", u32Val));
5158 if (u32Val < HostGdtr.cbGdt)
5159 {
5160 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5161 hmR0DumpDescriptor(pDesc, u32Val, "DS: ");
5162 }
5163
5164 rc = VMXReadVmcs32(VMX_VMCS16_HOST_ES_SEL, &u32Val); AssertRC(rc);
5165 Log4(("Host ES %#08x\n", u32Val));
5166 if (u32Val < HostGdtr.cbGdt)
5167 {
5168 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5169 hmR0DumpDescriptor(pDesc, u32Val, "ES: ");
5170 }
5171
5172 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FS_SEL, &u32Val); AssertRC(rc);
5173 Log4(("Host FS %#08x\n", u32Val));
5174 if (u32Val < HostGdtr.cbGdt)
5175 {
5176 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5177 hmR0DumpDescriptor(pDesc, u32Val, "FS: ");
5178 }
5179
5180 rc = VMXReadVmcs32(VMX_VMCS16_HOST_GS_SEL, &u32Val); AssertRC(rc);
5181 Log4(("Host GS %#08x\n", u32Val));
5182 if (u32Val < HostGdtr.cbGdt)
5183 {
5184 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5185 hmR0DumpDescriptor(pDesc, u32Val, "GS: ");
5186 }
5187
5188 rc = VMXReadVmcs32(VMX_VMCS16_HOST_SS_SEL, &u32Val); AssertRC(rc);
5189 Log4(("Host SS %#08x\n", u32Val));
5190 if (u32Val < HostGdtr.cbGdt)
5191 {
5192 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5193 hmR0DumpDescriptor(pDesc, u32Val, "SS: ");
5194 }
5195
5196 rc = VMXReadVmcs32(VMX_VMCS16_HOST_TR_SEL, &u32Val); AssertRC(rc);
5197 Log4(("Host TR %#08x\n", u32Val));
5198 if (u32Val < HostGdtr.cbGdt)
5199 {
5200 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
5201 hmR0DumpDescriptor(pDesc, u32Val, "TR: ");
5202 }
5203
5204 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
5205 Log4(("Host TR Base %#RHv\n", uHCReg));
5206 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
5207 Log4(("Host GDTR Base %#RHv\n", uHCReg));
5208 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
5209 Log4(("Host IDTR Base %#RHv\n", uHCReg));
5210 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
5211 Log4(("Host SYSENTER CS %#08x\n", u32Val));
5212 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
5213 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg));
5214 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
5215 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg));
5216 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
5217 Log4(("Host RSP %#RHv\n", uHCReg));
5218 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
5219 Log4(("Host RIP %#RHv\n", uHCReg));
5220# if HC_ARCH_BITS == 64
5221 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
5222 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
5223 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
5224 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
5225 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
5226 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
5227# endif
5228#endif /* VBOX_STRICT */
5229 break;
5230 }
5231
5232 default:
5233 /* Impossible */
5234 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
5235 break;
5236 }
5237 NOREF(pCtx);
5238}
5239
5240
5241#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
5242#ifndef VMX_USE_CACHED_VMCS_ACCESSES
5243# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
5244#endif
5245#ifdef VBOX_STRICT
5246static bool hmR0VmxIsValidWriteField(uint32_t idxField)
5247{
5248 switch (idxField)
5249 {
5250 case VMX_VMCS_GUEST_RIP:
5251 case VMX_VMCS_GUEST_RSP:
5252 case VMX_VMCS_GUEST_SYSENTER_EIP:
5253 case VMX_VMCS_GUEST_SYSENTER_ESP:
5254 case VMX_VMCS_GUEST_GDTR_BASE:
5255 case VMX_VMCS_GUEST_IDTR_BASE:
5256 case VMX_VMCS_GUEST_CS_BASE:
5257 case VMX_VMCS_GUEST_DS_BASE:
5258 case VMX_VMCS_GUEST_ES_BASE:
5259 case VMX_VMCS_GUEST_FS_BASE:
5260 case VMX_VMCS_GUEST_GS_BASE:
5261 case VMX_VMCS_GUEST_SS_BASE:
5262 case VMX_VMCS_GUEST_LDTR_BASE:
5263 case VMX_VMCS_GUEST_TR_BASE:
5264 case VMX_VMCS_GUEST_CR3:
5265 return true;
5266 }
5267 return false;
5268}
5269
5270static bool hmR0VmxIsValidReadField(uint32_t idxField)
5271{
5272 switch (idxField)
5273 {
5274 /* Read-only fields. */
5275 case VMX_VMCS_RO_EXIT_QUALIFICATION:
5276 return true;
5277 }
5278 /* Remaining readable fields should also be writable. */
5279 return hmR0VmxIsValidWriteField(idxField);
5280}
5281#endif /* VBOX_STRICT */
5282
5283
5284/**
5285 * Executes the specified handler in 64-bit mode.
5286 *
5287 * @returns VBox status code (no informational status codes).
5288 * @param pVCpu The cross context virtual CPU structure.
5289 * @param enmOp The operation to perform.
5290 * @param cParams Number of parameters.
5291 * @param paParam Array of 32-bit parameters.
5292 */
5293VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cParams, uint32_t *paParam)
5294{
5295 PVM pVM = pVCpu->CTX_SUFF(pVM);
5296 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
5297 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
5298 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
5299 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
5300
5301#ifdef VBOX_STRICT
5302 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
5303 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
5304
5305 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
5306 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
5307#endif
5308
5309 /* Disable interrupts. */
5310 RTCCUINTREG fOldEFlags = ASMIntDisableFlags();
5311
5312#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
5313 RTCPUID idHostCpu = RTMpCpuId();
5314 CPUMR0SetLApic(pVCpu, idHostCpu);
5315#endif
5316
5317 PHMGLOBALCPUINFO pCpu = hmR0GetCurrentCpu();
5318 RTHCPHYS HCPhysCpuPage = pCpu->HCPhysMemObj;
5319
5320 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
5321 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5322 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
5323
5324 /* Leave VMX Root Mode. */
5325 VMXDisable();
5326
5327 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
5328
5329 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
5330 CPUMSetHyperEIP(pVCpu, enmOp);
5331 for (int i = (int)cParams - 1; i >= 0; i--)
5332 CPUMPushHyper(pVCpu, paParam[i]);
5333
5334 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
5335
5336 /* Call the switcher. */
5337 int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
5338 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
5339
5340 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
5341 /* Make sure the VMX instructions don't cause #UD faults. */
5342 SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);
5343
5344 /* Re-enter VMX Root Mode */
5345 int rc2 = VMXEnable(HCPhysCpuPage);
5346 if (RT_FAILURE(rc2))
5347 {
5348 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
5349 ASMSetFlags(fOldEFlags);
5350 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
5351 return rc2;
5352 }
5353
5354 rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
5355 AssertRC(rc2);
5356 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
5357 Assert(!(ASMGetFlags() & X86_EFL_IF));
5358 ASMSetFlags(fOldEFlags);
5359 return rc;
5360}
5361
5362
5363/**
5364 * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts
5365 * supporting 64-bit guests.
5366 *
5367 * @returns VBox status code.
5368 * @param fResume Whether to VMLAUNCH or VMRESUME.
5369 * @param pCtx Pointer to the guest-CPU context.
5370 * @param pCache Pointer to the VMCS cache.
5371 * @param pVM The cross context VM structure.
5372 * @param pVCpu The cross context virtual CPU structure.
5373 */
5374DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
5375{
5376 NOREF(fResume);
5377
5378 PHMGLOBALCPUINFO pCpu = hmR0GetCurrentCpu();
5379 RTHCPHYS HCPhysCpuPage = pCpu->HCPhysMemObj;
5380
5381#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5382 pCache->uPos = 1;
5383 pCache->interPD = PGMGetInterPaeCR3(pVM);
5384 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
5385#endif
5386
5387#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5388 pCache->TestIn.HCPhysCpuPage = 0;
5389 pCache->TestIn.HCPhysVmcs = 0;
5390 pCache->TestIn.pCache = 0;
5391 pCache->TestOut.HCPhysVmcs = 0;
5392 pCache->TestOut.pCache = 0;
5393 pCache->TestOut.pCtx = 0;
5394 pCache->TestOut.eflags = 0;
5395#else
5396 NOREF(pCache);
5397#endif
5398
5399 uint32_t aParam[10];
5400 aParam[0] = RT_LO_U32(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
5401 aParam[1] = RT_HI_U32(HCPhysCpuPage); /* Param 1: VMXON physical address - Hi. */
5402 aParam[2] = RT_LO_U32(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
5403 aParam[3] = RT_HI_U32(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Hi. */
5404 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
5405 aParam[5] = 0;
5406 aParam[6] = VM_RC_ADDR(pVM, pVM);
5407 aParam[7] = 0;
5408 aParam[8] = VM_RC_ADDR(pVM, pVCpu);
5409 aParam[9] = 0;
5410
5411#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5412 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
5413 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
5414#endif
5415 int rc = VMXR0Execute64BitsHandler(pVCpu, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]);
5416
5417#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5418 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
5419 Assert(pCtx->dr[4] == 10);
5420 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
5421#endif
5422
5423#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
5424 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
5425 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5426 pVCpu->hm.s.vmx.HCPhysVmcs));
5427 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
5428 pCache->TestOut.HCPhysVmcs));
5429 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
5430 pCache->TestOut.pCache));
5431 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
5432 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
5433 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
5434 pCache->TestOut.pCtx));
5435 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
5436#endif
5437 NOREF(pCtx);
5438 return rc;
5439}
5440
5441
5442/**
5443 * Initialize the VMCS-Read cache.
5444 *
5445 * The VMCS cache is used for 32-bit hosts running 64-bit guests (except 32-bit
5446 * Darwin which runs with 64-bit paging in 32-bit mode) for 64-bit fields that
5447 * cannot be accessed in 32-bit mode. Some 64-bit fields -can- be accessed
5448 * (those that have a 32-bit FULL & HIGH part).
5449 *
5450 * @returns VBox status code.
5451 * @param pVCpu The cross context virtual CPU structure.
5452 */
5453static int hmR0VmxInitVmcsReadCache(PVMCPU pVCpu)
5454{
5455#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
5456 do { \
5457 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
5458 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
5459 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
5460 ++cReadFields; \
5461 } while (0)
5462
5463 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5464 uint32_t cReadFields = 0;
5465
5466 /*
5467 * Don't remove the #if 0'd fields in this code. They're listed here for consistency
5468 * and serve to indicate exceptions to the rules.
5469 */
5470
5471 /* Guest-natural selector base fields. */
5472#if 0
5473 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
5474 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
5475 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
5476#endif
5477 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
5478 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
5479 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
5480 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
5481 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
5482 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
5483 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
5484 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
5485 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
5486 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
5487 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
5488 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
5489#if 0
5490 /* Unused natural width guest-state fields. */
5491 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
5492 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
5493#endif
5494 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
5495 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
5496
5497 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for
5498 these 64-bit fields (using "FULL" and "HIGH" fields). */
5499#if 0
5500 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
5501 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
5502 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
5503 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
5504 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
5505 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
5506 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
5507 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
5508 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
5509#endif
5510
5511 /* Natural width guest-state fields. */
5512 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
5513#if 0
5514 /* Currently unused field. */
5515 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
5516#endif
5517
5518 if (pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
5519 {
5520 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
5521 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
5522 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
5523 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
5524 }
5525 else
5526 {
5527 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
5528 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
5529 }
5530
5531#undef VMXLOCAL_INIT_READ_CACHE_FIELD
5532 return VINF_SUCCESS;
5533}
5534
5535
5536/**
5537 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
5538 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
5539 * darwin, running 64-bit guests).
5540 *
5541 * @returns VBox status code.
5542 * @param pVCpu The cross context virtual CPU structure.
5543 * @param idxField The VMCS field encoding.
5544 * @param u64Val 16, 32 or 64-bit value.
5545 */
5546VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5547{
5548 int rc;
5549 switch (idxField)
5550 {
5551 /*
5552 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
5553 */
5554 /* 64-bit Control fields. */
5555 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
5556 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
5557 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
5558 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
5559 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
5560 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
5561 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
5562 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
5563 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
5564 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
5565 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
5566 case VMX_VMCS64_CTRL_EPTP_FULL:
5567 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
5568 /* 64-bit Guest-state fields. */
5569 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
5570 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
5571 case VMX_VMCS64_GUEST_PAT_FULL:
5572 case VMX_VMCS64_GUEST_EFER_FULL:
5573 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
5574 case VMX_VMCS64_GUEST_PDPTE0_FULL:
5575 case VMX_VMCS64_GUEST_PDPTE1_FULL:
5576 case VMX_VMCS64_GUEST_PDPTE2_FULL:
5577 case VMX_VMCS64_GUEST_PDPTE3_FULL:
5578 /* 64-bit Host-state fields. */
5579 case VMX_VMCS64_HOST_PAT_FULL:
5580 case VMX_VMCS64_HOST_EFER_FULL:
5581 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
5582 {
5583 rc = VMXWriteVmcs32(idxField, RT_LO_U32(u64Val));
5584 rc |= VMXWriteVmcs32(idxField + 1, RT_HI_U32(u64Val));
5585 break;
5586 }
5587
5588 /*
5589 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
5590 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
5591 */
5592 /* Natural-width Guest-state fields. */
5593 case VMX_VMCS_GUEST_CR3:
5594 case VMX_VMCS_GUEST_ES_BASE:
5595 case VMX_VMCS_GUEST_CS_BASE:
5596 case VMX_VMCS_GUEST_SS_BASE:
5597 case VMX_VMCS_GUEST_DS_BASE:
5598 case VMX_VMCS_GUEST_FS_BASE:
5599 case VMX_VMCS_GUEST_GS_BASE:
5600 case VMX_VMCS_GUEST_LDTR_BASE:
5601 case VMX_VMCS_GUEST_TR_BASE:
5602 case VMX_VMCS_GUEST_GDTR_BASE:
5603 case VMX_VMCS_GUEST_IDTR_BASE:
5604 case VMX_VMCS_GUEST_RSP:
5605 case VMX_VMCS_GUEST_RIP:
5606 case VMX_VMCS_GUEST_SYSENTER_ESP:
5607 case VMX_VMCS_GUEST_SYSENTER_EIP:
5608 {
5609 if (!(RT_HI_U32(u64Val)))
5610 {
5611 /* If this field is 64-bit, VT-x will zero out the top bits. */
5612 rc = VMXWriteVmcs32(idxField, RT_LO_U32(u64Val));
5613 }
5614 else
5615 {
5616 /* Assert that only the 32->64 switcher case should ever come here. */
5617 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
5618 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
5619 }
5620 break;
5621 }
5622
5623 default:
5624 {
5625 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
5626 rc = VERR_INVALID_PARAMETER;
5627 break;
5628 }
5629 }
5630 AssertRCReturn(rc, rc);
5631 return rc;
5632}
5633
5634
5635/**
5636 * Queue up a VMWRITE by using the VMCS write cache.
5637 * This is only used on 32-bit hosts (except darwin) for 64-bit guests.
5638 *
5639 * @param pVCpu The cross context virtual CPU structure.
5640 * @param idxField The VMCS field encoding.
5641 * @param u64Val 16, 32 or 64-bit value.
5642 */
5643VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5644{
5645 AssertPtr(pVCpu);
5646 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5647
5648 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
5649 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
5650
5651 /* Make sure there are no duplicates. */
5652 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
5653 {
5654 if (pCache->Write.aField[i] == idxField)
5655 {
5656 pCache->Write.aFieldVal[i] = u64Val;
5657 return VINF_SUCCESS;
5658 }
5659 }
5660
5661 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
5662 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
5663 pCache->Write.cValidEntries++;
5664 return VINF_SUCCESS;
5665}
5666#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
5667
5668
5669/**
5670 * Sets up the usage of TSC-offsetting and updates the VMCS.
5671 *
5672 * If offsetting is not possible, cause VM-exits on RDTSC(P)s. Also sets up the
5673 * VMX preemption timer.
5674 *
5675 * @returns VBox status code.
5676 * @param pVCpu The cross context virtual CPU structure.
5677 *
5678 * @remarks No-long-jump zone!!!
5679 */
5680static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu)
5681{
5682 bool fOffsettedTsc;
5683 bool fParavirtTsc;
5684 PVM pVM = pVCpu->CTX_SUFF(pVM);
5685 uint64_t uTscOffset;
5686 if (pVM->hm.s.vmx.fUsePreemptTimer)
5687 {
5688 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &uTscOffset, &fOffsettedTsc, &fParavirtTsc);
5689
5690 /* Make sure the returned values have sane upper and lower boundaries. */
5691 uint64_t u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
5692 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
5693 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
5694 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
5695
5696 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
5697 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount);
5698 AssertRC(rc);
5699 }
5700 else
5701 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
5702
5703 /** @todo later optimize this to be done elsewhere and not before every
5704 * VM-entry. */
5705 if (fParavirtTsc)
5706 {
5707 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
5708 information before every VM-entry, hence disable it for performance sake. */
5709#if 0
5710 int rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
5711 AssertRC(rc);
5712#endif
5713 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
5714 }
5715
5716 uint32_t uProcCtls = pVCpu->hm.s.vmx.u32ProcCtls;
5717 if ( fOffsettedTsc
5718 && RT_LIKELY(!pVCpu->hm.s.fDebugWantRdTscExit))
5719 {
5720 if (pVCpu->hm.s.vmx.u64TscOffset != uTscOffset)
5721 {
5722 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, uTscOffset);
5723 AssertRC(rc);
5724 pVCpu->hm.s.vmx.u64TscOffset = uTscOffset;
5725 }
5726
5727 if (uProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT)
5728 {
5729 uProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5730 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
5731 AssertRC(rc);
5732 pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls;
5733 }
5734 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
5735 }
5736 else
5737 {
5738 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
5739 if (!(uProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
5740 {
5741 uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
5742 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
5743 AssertRC(rc);
5744 pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls;
5745 }
5746 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
5747 }
5748}
5749
5750
5751/**
5752 * Gets the IEM exception flags for the specified vector and IDT vectoring /
5753 * VM-exit interruption info type.
5754 *
5755 * @returns The IEM exception flags.
5756 * @param uVector The event vector.
5757 * @param uVmxVectorType The VMX event type.
5758 *
5759 * @remarks This function currently only constructs flags required for
5760 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
5761 * and CR2 aspects of an exception are not included).
5762 */
5763static uint32_t hmR0VmxGetIemXcptFlags(uint8_t uVector, uint32_t uVmxVectorType)
5764{
5765 uint32_t fIemXcptFlags;
5766 switch (uVmxVectorType)
5767 {
5768 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
5769 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
5770 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
5771 break;
5772
5773 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
5774 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
5775 break;
5776
5777 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
5778 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
5779 break;
5780
5781 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
5782 {
5783 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
5784 if (uVector == X86_XCPT_BP)
5785 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
5786 else if (uVector == X86_XCPT_OF)
5787 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
5788 else
5789 {
5790 fIemXcptFlags = 0;
5791 AssertMsgFailed(("Unexpected vector for software int. uVector=%#x", uVector));
5792 }
5793 break;
5794 }
5795
5796 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
5797 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
5798 break;
5799
5800 default:
5801 fIemXcptFlags = 0;
5802 AssertMsgFailed(("Unexpected vector type! uVmxVectorType=%#x uVector=%#x", uVmxVectorType, uVector));
5803 break;
5804 }
5805 return fIemXcptFlags;
5806}
5807
5808
5809/**
5810 * Sets an event as a pending event to be injected into the guest.
5811 *
5812 * @param pVCpu The cross context virtual CPU structure.
5813 * @param u32IntInfo The VM-entry interruption-information field.
5814 * @param cbInstr The VM-entry instruction length in bytes (for software
5815 * interrupts, exceptions and privileged software
5816 * exceptions).
5817 * @param u32ErrCode The VM-entry exception error code.
5818 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
5819 * page-fault.
5820 *
5821 * @remarks Statistics counter assumes this is a guest event being injected or
5822 * re-injected into the guest, i.e. 'StatInjectPendingReflect' is
5823 * always incremented.
5824 */
5825DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
5826 RTGCUINTPTR GCPtrFaultAddress)
5827{
5828 Assert(!pVCpu->hm.s.Event.fPending);
5829 pVCpu->hm.s.Event.fPending = true;
5830 pVCpu->hm.s.Event.u64IntInfo = u32IntInfo;
5831 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
5832 pVCpu->hm.s.Event.cbInstr = cbInstr;
5833 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
5834}
5835
5836
5837/**
5838 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
5839 *
5840 * @param pVCpu The cross context virtual CPU structure.
5841 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5842 * out-of-sync. Make sure to update the required fields
5843 * before using them.
5844 */
5845DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5846{
5847 NOREF(pMixedCtx);
5848 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
5849 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5850 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5851 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
5852}
5853
5854
5855/**
5856 * Handle a condition that occurred while delivering an event through the guest
5857 * IDT.
5858 *
5859 * @returns Strict VBox status code (i.e. informational status codes too).
5860 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
5861 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
5862 * to continue execution of the guest which will delivery the \#DF.
5863 * @retval VINF_EM_RESET if we detected a triple-fault condition.
5864 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
5865 *
5866 * @param pVCpu The cross context virtual CPU structure.
5867 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5868 * out-of-sync. Make sure to update the required fields
5869 * before using them.
5870 * @param pVmxTransient Pointer to the VMX transient structure.
5871 *
5872 * @remarks No-long-jump zone!!!
5873 */
5874static VBOXSTRICTRC hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
5875{
5876 uint32_t const uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
5877
5878 int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
5879 rc2 |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
5880 AssertRCReturn(rc2, rc2);
5881
5882 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
5883 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
5884 {
5885 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
5886 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
5887
5888 /*
5889 * If the event was a software interrupt (generated with INT n) or a software exception
5890 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
5891 * can handle the VM-exit and continue guest execution which will re-execute the
5892 * instruction rather than re-injecting the exception, as that can cause premature
5893 * trips to ring-3 before injection and involve TRPM which currently has no way of
5894 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
5895 * the problem).
5896 */
5897 IEMXCPTRAISE enmRaise;
5898 IEMXCPTRAISEINFO fRaiseInfo;
5899 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5900 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5901 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
5902 {
5903 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
5904 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5905 }
5906 else if (VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo))
5907 {
5908 uint32_t const uExitVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uExitIntInfo);
5909 uint32_t const fIdtVectorFlags = hmR0VmxGetIemXcptFlags(uIdtVector, uIdtVectorType);
5910 uint32_t const fExitVectorFlags = hmR0VmxGetIemXcptFlags(uExitVector, uExitVectorType);
5911 /** @todo Make AssertMsgReturn as just AssertMsg later. */
5912 AssertMsgReturn(uExitVectorType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT,
5913 ("hmR0VmxCheckExitDueToEventDelivery: Unexpected VM-exit interruption info. %#x!\n",
5914 uExitVectorType), VERR_VMX_IPE_5);
5915
5916 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
5917
5918 /* Determine a vectoring #PF condition, see comment in hmR0VmxExitXcptPF(). */
5919 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
5920 {
5921 pVmxTransient->fVectoringPF = true;
5922 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5923 }
5924 }
5925 else
5926 {
5927 /*
5928 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
5929 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
5930 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
5931 */
5932 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5933 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5934 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
5935 enmRaise = IEMXCPTRAISE_PREV_EVENT;
5936 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
5937 }
5938
5939 /*
5940 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
5941 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
5942 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
5943 * subsequent VM-entry would fail.
5944 *
5945 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}.
5946 */
5947 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)
5948 && uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
5949 && ( enmRaise == IEMXCPTRAISE_PREV_EVENT
5950 || (fRaiseInfo & IEMXCPTRAISEINFO_NMI_PF))
5951 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
5952 {
5953 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
5954 }
5955
5956 switch (enmRaise)
5957 {
5958 case IEMXCPTRAISE_CURRENT_XCPT:
5959 {
5960 Log4Func(("IDT: Pending secondary Xcpt: uIdtVectoringInfo=%#RX64 uExitIntInfo=%#RX64\n",
5961 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uExitIntInfo));
5962 Assert(rcStrict == VINF_SUCCESS);
5963 break;
5964 }
5965
5966 case IEMXCPTRAISE_PREV_EVENT:
5967 {
5968 uint32_t u32ErrCode;
5969 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo))
5970 {
5971 rc2 = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
5972 AssertRCReturn(rc2, rc2);
5973 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5974 }
5975 else
5976 u32ErrCode = 0;
5977
5978 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see hmR0VmxExitXcptPF(). */
5979 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
5980 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
5981 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
5982
5983 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo,
5984 pVCpu->hm.s.Event.u32ErrCode));
5985 Assert(rcStrict == VINF_SUCCESS);
5986 break;
5987 }
5988
5989 case IEMXCPTRAISE_REEXEC_INSTR:
5990 Assert(rcStrict == VINF_SUCCESS);
5991 break;
5992
5993 case IEMXCPTRAISE_DOUBLE_FAULT:
5994 {
5995 /*
5996 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
5997 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
5998 */
5999 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
6000 {
6001 pVmxTransient->fVectoringDoublePF = true;
6002 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo,
6003 pMixedCtx->cr2));
6004 rcStrict = VINF_SUCCESS;
6005 }
6006 else
6007 {
6008 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
6009 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
6010 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,
6011 uIdtVector, uExitVector));
6012 rcStrict = VINF_HM_DOUBLE_FAULT;
6013 }
6014 break;
6015 }
6016
6017 case IEMXCPTRAISE_TRIPLE_FAULT:
6018 {
6019 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));
6020 rcStrict = VINF_EM_RESET;
6021 break;
6022 }
6023
6024 case IEMXCPTRAISE_CPU_HANG:
6025 {
6026 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
6027 rcStrict = VERR_EM_GUEST_CPU_HANG;
6028 break;
6029 }
6030
6031 default:
6032 {
6033 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
6034 rcStrict = VERR_VMX_IPE_2;
6035 break;
6036 }
6037 }
6038 }
6039 else if ( VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo)
6040 && VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(pVmxTransient->uExitIntInfo)
6041 && uExitVector != X86_XCPT_DF
6042 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI))
6043 {
6044 /*
6045 * Execution of IRET caused this fault when NMI blocking was in effect (i.e we're in the guest NMI handler).
6046 * We need to set the block-by-NMI field so that NMIs remain blocked until the IRET execution is restarted.
6047 * See Intel spec. 30.7.1.2 "Resuming guest software after handling an exception".
6048 */
6049 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6050 {
6051 Log4Func(("Setting VMCPU_FF_BLOCK_NMIS. fValid=%RTbool uExitReason=%u\n",
6052 VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason));
6053 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6054 }
6055 }
6056
6057 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
6058 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
6059 return rcStrict;
6060}
6061
6062
6063/**
6064 * Imports a guest segment register from the current VMCS into
6065 * the guest-CPU context.
6066 *
6067 * @returns VBox status code.
6068 * @param pVCpu The cross context virtual CPU structure.
6069 * @param idxSel Index of the selector in the VMCS.
6070 * @param idxLimit Index of the segment limit in the VMCS.
6071 * @param idxBase Index of the segment base in the VMCS.
6072 * @param idxAccess Index of the access rights of the segment in the VMCS.
6073 * @param pSelReg Pointer to the segment selector.
6074 *
6075 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
6076 * do not log!
6077 *
6078 * @remarks Never call this function directly!!! Use the
6079 * HMVMX_IMPORT_SREG() macro as that takes care
6080 * of whether to read from the VMCS cache or not.
6081 */
6082static int hmR0VmxImportGuestSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
6083 PCPUMSELREG pSelReg)
6084{
6085 NOREF(pVCpu);
6086
6087 uint32_t u32Sel;
6088 uint32_t u32Limit;
6089 uint32_t u32Attr;
6090 uint64_t u64Base;
6091 int rc = VMXReadVmcs32(idxSel, &u32Sel);
6092 rc |= VMXReadVmcs32(idxLimit, &u32Limit);
6093 rc |= VMXReadVmcs32(idxAccess, &u32Attr);
6094 rc |= VMXReadVmcsGstNByIdxVal(idxBase, &u64Base);
6095 AssertRCReturn(rc, rc);
6096
6097 pSelReg->Sel = (uint16_t)u32Sel;
6098 pSelReg->ValidSel = (uint16_t)u32Sel;
6099 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6100 pSelReg->u32Limit = u32Limit;
6101 pSelReg->u64Base = u64Base;
6102 pSelReg->Attr.u = u32Attr;
6103
6104 /*
6105 * If VT-x marks the segment as unusable, most other bits remain undefined:
6106 * - For CS the L, D and G bits have meaning.
6107 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
6108 * - For the remaining data segments no bits are defined.
6109 *
6110 * The present bit and the unusable bit has been observed to be set at the
6111 * same time (the selector was supposed to be invalid as we started executing
6112 * a V8086 interrupt in ring-0).
6113 *
6114 * What should be important for the rest of the VBox code, is that the P bit is
6115 * cleared. Some of the other VBox code recognizes the unusable bit, but
6116 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
6117 * safe side here, we'll strip off P and other bits we don't care about. If
6118 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
6119 *
6120 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
6121 */
6122 if (pSelReg->Attr.u & X86DESCATTR_UNUSABLE)
6123 {
6124 Assert(idxSel != VMX_VMCS16_GUEST_TR_SEL); /* TR is the only selector that can never be unusable. */
6125
6126 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
6127 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
6128 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
6129#ifdef VBOX_STRICT
6130 VMMRZCallRing3Disable(pVCpu);
6131 Log4Func(("Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Sel, pSelReg->Attr.u));
6132# ifdef DEBUG_bird
6133 AssertMsg((u32Attr & ~X86DESCATTR_P) == pSelReg->Attr.u,
6134 ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
6135 idxSel, u32Sel, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
6136# endif
6137 VMMRZCallRing3Enable(pVCpu);
6138#endif
6139 }
6140 return VINF_SUCCESS;
6141}
6142
6143
6144/**
6145 * Imports the guest RIP from the VMCS back into the guest-CPU context.
6146 *
6147 * @returns VBox status code.
6148 * @param pVCpu The cross context virtual CPU structure.
6149 *
6150 * @remarks Called with interrupts and/or preemption disabled, should not assert!
6151 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
6152 * instead!!!
6153 */
6154DECLINLINE(int) hmR0VmxImportGuestRip(PVMCPU pVCpu)
6155{
6156 uint64_t u64Val;
6157 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6158 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
6159 {
6160 int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
6161 if (RT_SUCCESS(rc))
6162 {
6163 pCtx->rip = u64Val;
6164 EMR0HistoryUpdatePC(pVCpu, pCtx->rip, false);
6165 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
6166 }
6167 return rc;
6168 }
6169 return VINF_SUCCESS;
6170}
6171
6172
6173/**
6174 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
6175 *
6176 * @returns VBox status code.
6177 * @param pVCpu The cross context virtual CPU structure.
6178 *
6179 * @remarks Called with interrupts and/or preemption disabled, should not assert!
6180 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
6181 * instead!!!
6182 */
6183DECLINLINE(int) hmR0VmxImportGuestRFlags(PVMCPU pVCpu)
6184{
6185 uint32_t u32Val;
6186 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6187 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
6188 {
6189 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val);
6190 if (RT_SUCCESS(rc))
6191 {
6192 pCtx->eflags.u32 = u32Val;
6193
6194 /* Restore eflags for real-on-v86-mode hack. */
6195 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6196 {
6197 pCtx->eflags.Bits.u1VM = 0;
6198 pCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
6199 }
6200 }
6201 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
6202 return rc;
6203 }
6204 return VINF_SUCCESS;
6205}
6206
6207
6208/**
6209 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
6210 * context.
6211 *
6212 * @returns VBox status code.
6213 * @param pVCpu The cross context virtual CPU structure.
6214 *
6215 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
6216 * do not log!
6217 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
6218 * instead!!!
6219 */
6220DECLINLINE(int) hmR0VmxImportGuestIntrState(PVMCPU pVCpu)
6221{
6222 uint32_t u32Val;
6223 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6224 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32Val);
6225 if (RT_SUCCESS(rc))
6226 {
6227 /*
6228 * We additionally have a requirement to import RIP, RFLAGS depending on whether we
6229 * might need them in hmR0VmxEvaluatePendingEvent().
6230 */
6231 if (!u32Val)
6232 {
6233 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6234 {
6235 rc = hmR0VmxImportGuestRip(pVCpu);
6236 rc |= hmR0VmxImportGuestRFlags(pVCpu);
6237 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6238 }
6239
6240 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6241 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6242 }
6243 else
6244 {
6245 rc = hmR0VmxImportGuestRip(pVCpu);
6246 rc |= hmR0VmxImportGuestRFlags(pVCpu);
6247
6248 if (u32Val & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
6249 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI))
6250 {
6251 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
6252 }
6253 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6254 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6255
6256 if (u32Val & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)
6257 {
6258 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6259 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6260 }
6261 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
6262 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
6263 }
6264 }
6265 return rc;
6266}
6267
6268
6269/**
6270 * Worker for VMXR0ImportStateOnDemand.
6271 *
6272 * @returns VBox status code.
6273 * @param pVCpu The cross context virtual CPU structure.
6274 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
6275 */
6276static int hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat)
6277{
6278#define VMXLOCAL_BREAK_RC(a_rc) \
6279 if (RT_FAILURE(a_rc)) \
6280 break
6281
6282 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x);
6283
6284 int rc = VINF_SUCCESS;
6285 PVM pVM = pVCpu->CTX_SUFF(pVM);
6286 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6287 uint64_t u64Val;
6288 uint32_t u32Val;
6289
6290 Log4Func(("fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat));
6291
6292 /*
6293 * We disable interrupts to make the updating of the state and in particular
6294 * the fExtrn modification atomic wrt to preemption hooks.
6295 */
6296 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
6297
6298 fWhat &= pCtx->fExtrn;
6299 if (fWhat)
6300 {
6301 do
6302 {
6303 if (fWhat & CPUMCTX_EXTRN_RIP)
6304 {
6305 rc = hmR0VmxImportGuestRip(pVCpu);
6306 VMXLOCAL_BREAK_RC(rc);
6307 }
6308
6309 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
6310 {
6311 rc = hmR0VmxImportGuestRFlags(pVCpu);
6312 VMXLOCAL_BREAK_RC(rc);
6313 }
6314
6315 if (fWhat & CPUMCTX_EXTRN_HM_VMX_INT_STATE)
6316 {
6317 rc = hmR0VmxImportGuestIntrState(pVCpu);
6318 VMXLOCAL_BREAK_RC(rc);
6319 }
6320
6321 if (fWhat & CPUMCTX_EXTRN_RSP)
6322 {
6323 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
6324 VMXLOCAL_BREAK_RC(rc);
6325 pCtx->rsp = u64Val;
6326 }
6327
6328 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
6329 {
6330 if (fWhat & CPUMCTX_EXTRN_CS)
6331 {
6332 rc = HMVMX_IMPORT_SREG(CS, &pCtx->cs);
6333 rc |= hmR0VmxImportGuestRip(pVCpu);
6334 VMXLOCAL_BREAK_RC(rc);
6335 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6336 pCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
6337 EMR0HistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true);
6338 }
6339 if (fWhat & CPUMCTX_EXTRN_SS)
6340 {
6341 rc = HMVMX_IMPORT_SREG(SS, &pCtx->ss);
6342 VMXLOCAL_BREAK_RC(rc);
6343 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6344 pCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
6345 }
6346 if (fWhat & CPUMCTX_EXTRN_DS)
6347 {
6348 rc = HMVMX_IMPORT_SREG(DS, &pCtx->ds);
6349 VMXLOCAL_BREAK_RC(rc);
6350 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6351 pCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
6352 }
6353 if (fWhat & CPUMCTX_EXTRN_ES)
6354 {
6355 rc = HMVMX_IMPORT_SREG(ES, &pCtx->es);
6356 VMXLOCAL_BREAK_RC(rc);
6357 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6358 pCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
6359 }
6360 if (fWhat & CPUMCTX_EXTRN_FS)
6361 {
6362 rc = HMVMX_IMPORT_SREG(FS, &pCtx->fs);
6363 VMXLOCAL_BREAK_RC(rc);
6364 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6365 pCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
6366 }
6367 if (fWhat & CPUMCTX_EXTRN_GS)
6368 {
6369 rc = HMVMX_IMPORT_SREG(GS, &pCtx->gs);
6370 VMXLOCAL_BREAK_RC(rc);
6371 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6372 pCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
6373 }
6374 }
6375
6376 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
6377 {
6378 if (fWhat & CPUMCTX_EXTRN_LDTR)
6379 {
6380 rc = HMVMX_IMPORT_SREG(LDTR, &pCtx->ldtr);
6381 VMXLOCAL_BREAK_RC(rc);
6382 }
6383
6384 if (fWhat & CPUMCTX_EXTRN_GDTR)
6385 {
6386 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
6387 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
6388 VMXLOCAL_BREAK_RC(rc);
6389 pCtx->gdtr.pGdt = u64Val;
6390 pCtx->gdtr.cbGdt = u32Val;
6391 }
6392
6393 /* Guest IDTR. */
6394 if (fWhat & CPUMCTX_EXTRN_IDTR)
6395 {
6396 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
6397 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
6398 VMXLOCAL_BREAK_RC(rc);
6399 pCtx->idtr.pIdt = u64Val;
6400 pCtx->idtr.cbIdt = u32Val;
6401 }
6402
6403 /* Guest TR. */
6404 if (fWhat & CPUMCTX_EXTRN_TR)
6405 {
6406 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR, don't save that one. */
6407 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
6408 {
6409 rc = HMVMX_IMPORT_SREG(TR, &pCtx->tr);
6410 VMXLOCAL_BREAK_RC(rc);
6411 }
6412 }
6413 }
6414
6415 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
6416 {
6417 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip);
6418 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp);
6419 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val);
6420 pCtx->SysEnter.cs = u32Val;
6421 VMXLOCAL_BREAK_RC(rc);
6422 }
6423
6424#if HC_ARCH_BITS == 64
6425 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
6426 {
6427 if ( pVM->hm.s.fAllow64BitGuests
6428 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
6429 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
6430 }
6431
6432 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
6433 {
6434 if ( pVM->hm.s.fAllow64BitGuests
6435 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
6436 {
6437 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
6438 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
6439 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
6440 }
6441 }
6442#endif
6443
6444 if ( (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
6445#if HC_ARCH_BITS == 32
6446 || (fWhat & (CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS))
6447#endif
6448 )
6449 {
6450 PCVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
6451 uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs;
6452 for (uint32_t i = 0; i < cMsrs; i++, pMsr++)
6453 {
6454 switch (pMsr->u32Msr)
6455 {
6456#if HC_ARCH_BITS == 32
6457 case MSR_K8_LSTAR: pCtx->msrLSTAR = pMsr->u64Value; break;
6458 case MSR_K6_STAR: pCtx->msrSTAR = pMsr->u64Value; break;
6459 case MSR_K8_SF_MASK: pCtx->msrSFMASK = pMsr->u64Value; break;
6460 case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsr->u64Value; break;
6461#endif
6462 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value); break;
6463 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsr->u64Value); break;
6464 case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit */ break;
6465 default:
6466 {
6467 pVCpu->hm.s.u32HMError = pMsr->u32Msr;
6468 ASMSetFlags(fEFlags);
6469 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr,
6470 cMsrs));
6471 return VERR_HM_UNEXPECTED_LD_ST_MSR;
6472 }
6473 }
6474 }
6475 }
6476
6477 if (fWhat & CPUMCTX_EXTRN_DR7)
6478 {
6479 if (!pVCpu->hm.s.fUsingHyperDR7)
6480 {
6481 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
6482 rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val);
6483 VMXLOCAL_BREAK_RC(rc);
6484 pCtx->dr[7] = u32Val;
6485 }
6486 }
6487
6488 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
6489 {
6490 uint32_t u32Shadow;
6491 if (fWhat & CPUMCTX_EXTRN_CR0)
6492 {
6493 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val);
6494 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u32Shadow);
6495 VMXLOCAL_BREAK_RC(rc);
6496 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32Cr0Mask)
6497 | (u32Shadow & pVCpu->hm.s.vmx.u32Cr0Mask);
6498 VMMRZCallRing3Disable(pVCpu); /* Calls into PGM which has Log statements. */
6499 CPUMSetGuestCR0(pVCpu, u32Val);
6500 VMMRZCallRing3Enable(pVCpu);
6501 }
6502
6503 if (fWhat & CPUMCTX_EXTRN_CR4)
6504 {
6505 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val);
6506 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u32Shadow);
6507 VMXLOCAL_BREAK_RC(rc);
6508 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32Cr4Mask)
6509 | (u32Shadow & pVCpu->hm.s.vmx.u32Cr4Mask);
6510 CPUMSetGuestCR4(pVCpu, u32Val);
6511 }
6512
6513 if (fWhat & CPUMCTX_EXTRN_CR3)
6514 {
6515 /* CR0.PG bit changes are always intercepted, so it's up to date. */
6516 if ( pVM->hm.s.vmx.fUnrestrictedGuest
6517 || ( pVM->hm.s.fNestedPaging
6518 && CPUMIsGuestPagingEnabledEx(pCtx)))
6519 {
6520 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
6521 if (pCtx->cr3 != u64Val)
6522 {
6523 CPUMSetGuestCR3(pVCpu, u64Val);
6524 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
6525 }
6526
6527 /* If the guest is in PAE mode, sync back the PDPE's into the guest state.
6528 Note: CR4.PAE, CR0.PG, EFER bit changes are always intercepted, so they're up to date. */
6529 if (CPUMIsGuestInPAEModeEx(pCtx))
6530 {
6531 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);
6532 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);
6533 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);
6534 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);
6535 VMXLOCAL_BREAK_RC(rc);
6536 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
6537 }
6538 }
6539 }
6540 }
6541 } while (0);
6542
6543 if (RT_SUCCESS(rc))
6544 {
6545 /* Update fExtrn. */
6546 pCtx->fExtrn &= ~fWhat;
6547
6548 /* If everything has been imported, clear the HM keeper bit. */
6549 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
6550 {
6551 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
6552 Assert(!pCtx->fExtrn);
6553 }
6554 }
6555 }
6556 else
6557 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
6558
6559 ASMSetFlags(fEFlags);
6560
6561 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatImportGuestState, x);
6562
6563 /*
6564 * Honor any pending CR3 updates.
6565 *
6566 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
6567 * -> VMMRZCallRing3Disable() -> hmR0VmxImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
6568 * -> continue with VM-exit handling -> hmR0VmxImportGuestState() and here we are.
6569 *
6570 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
6571 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
6572 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
6573 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
6574 *
6575 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
6576 */
6577 if (VMMRZCallRing3IsEnabled(pVCpu))
6578 {
6579 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
6580 {
6581 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
6582 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
6583 }
6584
6585 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
6586 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
6587
6588 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
6589 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
6590 }
6591
6592 return VINF_SUCCESS;
6593#undef VMXLOCAL_BREAK_RC
6594}
6595
6596
6597/**
6598 * Saves the guest state from the VMCS into the guest-CPU context.
6599 *
6600 * @returns VBox status code.
6601 * @param pVCpu The cross context virtual CPU structure.
6602 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
6603 */
6604VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat)
6605{
6606 return hmR0VmxImportGuestState(pVCpu, fWhat);
6607}
6608
6609
6610/**
6611 * Check per-VM and per-VCPU force flag actions that require us to go back to
6612 * ring-3 for one reason or another.
6613 *
6614 * @returns Strict VBox status code (i.e. informational status codes too)
6615 * @retval VINF_SUCCESS if we don't have any actions that require going back to
6616 * ring-3.
6617 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
6618 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
6619 * interrupts)
6620 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
6621 * all EMTs to be in ring-3.
6622 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
6623 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
6624 * to the EM loop.
6625 *
6626 * @param pVCpu The cross context virtual CPU structure.
6627 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6628 * out-of-sync. Make sure to update the required fields
6629 * before using them.
6630 * @param fStepping Running in hmR0VmxRunGuestCodeStep().
6631 */
6632static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping)
6633{
6634 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6635
6636 /*
6637 * Anything pending? Should be more likely than not if we're doing a good job.
6638 */
6639 PVM pVM = pVCpu->CTX_SUFF(pVM);
6640 if ( !fStepping
6641 ? !VM_FF_IS_PENDING(pVM, VM_FF_HP_R0_PRE_HM_MASK)
6642 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
6643 : !VM_FF_IS_PENDING(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
6644 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
6645 return VINF_SUCCESS;
6646
6647 /* Pending PGM C3 sync. */
6648 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
6649 {
6650 Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
6651 VBOXSTRICTRC rcStrict2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4,
6652 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
6653 if (rcStrict2 != VINF_SUCCESS)
6654 {
6655 AssertRC(VBOXSTRICTRC_VAL(rcStrict2));
6656 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict2)));
6657 return rcStrict2;
6658 }
6659 }
6660
6661 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
6662 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
6663 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
6664 {
6665 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
6666 int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
6667 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
6668 return rc2;
6669 }
6670
6671 /* Pending VM request packets, such as hardware interrupts. */
6672 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
6673 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
6674 {
6675 Log4Func(("Pending VM request forcing us back to ring-3\n"));
6676 return VINF_EM_PENDING_REQUEST;
6677 }
6678
6679 /* Pending PGM pool flushes. */
6680 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
6681 {
6682 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
6683 return VINF_PGM_POOL_FLUSH_PENDING;
6684 }
6685
6686 /* Pending DMA requests. */
6687 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
6688 {
6689 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
6690 return VINF_EM_RAW_TO_R3;
6691 }
6692
6693 return VINF_SUCCESS;
6694}
6695
6696
6697/**
6698 * Converts any TRPM trap into a pending HM event. This is typically used when
6699 * entering from ring-3 (not longjmp returns).
6700 *
6701 * @param pVCpu The cross context virtual CPU structure.
6702 */
6703static void hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
6704{
6705 Assert(TRPMHasTrap(pVCpu));
6706 Assert(!pVCpu->hm.s.Event.fPending);
6707
6708 uint8_t uVector;
6709 TRPMEVENT enmTrpmEvent;
6710 RTGCUINT uErrCode;
6711 RTGCUINTPTR GCPtrFaultAddress;
6712 uint8_t cbInstr;
6713
6714 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
6715 AssertRC(rc);
6716
6717 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntInfo. */
6718 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
6719 if (enmTrpmEvent == TRPM_TRAP)
6720 {
6721 switch (uVector)
6722 {
6723 case X86_XCPT_NMI:
6724 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6725 break;
6726
6727 case X86_XCPT_BP:
6728 case X86_XCPT_OF:
6729 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6730 break;
6731
6732 case X86_XCPT_PF:
6733 case X86_XCPT_DF:
6734 case X86_XCPT_TS:
6735 case X86_XCPT_NP:
6736 case X86_XCPT_SS:
6737 case X86_XCPT_GP:
6738 case X86_XCPT_AC:
6739 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6740 RT_FALL_THRU();
6741 default:
6742 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6743 break;
6744 }
6745 }
6746 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
6747 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6748 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
6749 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6750 else
6751 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
6752
6753 rc = TRPMResetTrap(pVCpu);
6754 AssertRC(rc);
6755 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
6756 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
6757
6758 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
6759}
6760
6761
6762/**
6763 * Converts the pending HM event into a TRPM trap.
6764 *
6765 * @param pVCpu The cross context virtual CPU structure.
6766 */
6767static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
6768{
6769 Assert(pVCpu->hm.s.Event.fPending);
6770
6771 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
6772 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo);
6773 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntInfo);
6774 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
6775
6776 /* If a trap was already pending, we did something wrong! */
6777 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
6778
6779 TRPMEVENT enmTrapType;
6780 switch (uVectorType)
6781 {
6782 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
6783 enmTrapType = TRPM_HARDWARE_INT;
6784 break;
6785
6786 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
6787 enmTrapType = TRPM_SOFTWARE_INT;
6788 break;
6789
6790 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
6791 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
6792 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
6793 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
6794 enmTrapType = TRPM_TRAP;
6795 break;
6796
6797 default:
6798 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
6799 enmTrapType = TRPM_32BIT_HACK;
6800 break;
6801 }
6802
6803 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
6804
6805 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
6806 AssertRC(rc);
6807
6808 if (fErrorCodeValid)
6809 TRPMSetErrorCode(pVCpu, uErrorCode);
6810
6811 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6812 && uVector == X86_XCPT_PF)
6813 {
6814 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
6815 }
6816 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6817 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6818 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6819 {
6820 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6821 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
6822 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
6823 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
6824 }
6825
6826 /* Clear any pending events from the VMCS. */
6827 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0); AssertRC(rc);
6828 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0); AssertRC(rc);
6829
6830 /* We're now done converting the pending event. */
6831 pVCpu->hm.s.Event.fPending = false;
6832}
6833
6834
6835/**
6836 * Does the necessary state syncing before returning to ring-3 for any reason
6837 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
6838 *
6839 * @returns VBox status code.
6840 * @param pVCpu The cross context virtual CPU structure.
6841 * @param fImportState Whether to import the guest state from the VMCS back
6842 * to the guest-CPU context.
6843 *
6844 * @remarks No-long-jmp zone!!!
6845 */
6846static int hmR0VmxLeave(PVMCPU pVCpu, bool fImportState)
6847{
6848 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6849 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6850
6851 RTCPUID idCpu = RTMpCpuId();
6852 Log4Func(("HostCpuId=%u\n", idCpu));
6853
6854 /*
6855 * !!! IMPORTANT !!!
6856 * If you modify code here, check whether hmR0VmxCallRing3Callback() needs to be updated too.
6857 */
6858
6859 /* Save the guest state if necessary. */
6860 if (fImportState)
6861 {
6862 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
6863 AssertRCReturn(rc, rc);
6864 }
6865
6866 /* Restore host FPU state if necessary. We will resync on next R0 reentry. */
6867 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
6868 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
6869
6870 /* Restore host debug registers if necessary. We will resync on next R0 reentry. */
6871#ifdef VBOX_STRICT
6872 if (CPUMIsHyperDebugStateActive(pVCpu))
6873 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
6874#endif
6875 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
6876 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu));
6877 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu));
6878
6879#if HC_ARCH_BITS == 64
6880 /* Restore host-state bits that VT-x only restores partially. */
6881 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
6882 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
6883 {
6884 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu));
6885 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
6886 }
6887 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
6888#endif
6889
6890 /* Restore the lazy host MSRs as we're leaving VT-x context. */
6891 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
6892 {
6893 /* We shouldn't restore the host MSRs without saving the guest MSRs first. */
6894 if (!fImportState)
6895 {
6896 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_KERNEL_GS_BASE
6897 | CPUMCTX_EXTRN_SYSCALL_MSRS);
6898 AssertRCReturn(rc, rc);
6899 }
6900 hmR0VmxLazyRestoreHostMsrs(pVCpu);
6901 Assert(!pVCpu->hm.s.vmx.fLazyMsrs);
6902 }
6903 else
6904 pVCpu->hm.s.vmx.fLazyMsrs = 0;
6905
6906 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
6907 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
6908
6909 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
6910 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState);
6911 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState);
6912 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit);
6913 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling);
6914 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
6915 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
6916 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
6917 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
6918
6919 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
6920
6921 /** @todo This partially defeats the purpose of having preemption hooks.
6922 * The problem is, deregistering the hooks should be moved to a place that
6923 * lasts until the EMT is about to be destroyed not everytime while leaving HM
6924 * context.
6925 */
6926 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
6927 {
6928 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
6929 AssertRCReturn(rc, rc);
6930
6931 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
6932 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
6933 }
6934 Assert(!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
6935 NOREF(idCpu);
6936
6937 return VINF_SUCCESS;
6938}
6939
6940
6941/**
6942 * Leaves the VT-x session.
6943 *
6944 * @returns VBox status code.
6945 * @param pVCpu The cross context virtual CPU structure.
6946 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6947 * out-of-sync. Make sure to update the required fields
6948 * before using them.
6949 *
6950 * @remarks No-long-jmp zone!!!
6951 */
6952static int hmR0VmxLeaveSession(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6953{
6954 HM_DISABLE_PREEMPT();
6955 HMVMX_ASSERT_CPU_SAFE();
6956 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6957 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6958
6959 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
6960 and done this from the VMXR0ThreadCtxCallback(). */
6961 if (!pVCpu->hm.s.fLeaveDone)
6962 {
6963 int rc2 = hmR0VmxLeave(pVCpu, true /* fImportState */);
6964 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2);
6965 pVCpu->hm.s.fLeaveDone = true;
6966 }
6967 Assert(!pMixedCtx->fExtrn); NOREF(pMixedCtx);
6968
6969 /*
6970 * !!! IMPORTANT !!!
6971 * If you modify code here, make sure to check whether hmR0VmxCallRing3Callback() needs to be updated too.
6972 */
6973
6974 /* Deregister hook now that we've left HM context before re-enabling preemption. */
6975 /** @todo Deregistering here means we need to VMCLEAR always
6976 * (longjmp/exit-to-r3) in VT-x which is not efficient. */
6977 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
6978 VMMR0ThreadCtxHookDisable(pVCpu);
6979
6980 /* Leave HM context. This takes care of local init (term). */
6981 int rc = HMR0LeaveCpu(pVCpu);
6982
6983 HM_RESTORE_PREEMPT();
6984 return rc;
6985}
6986
6987
6988/**
6989 * Does the necessary state syncing before doing a longjmp to ring-3.
6990 *
6991 * @returns VBox status code.
6992 * @param pVCpu The cross context virtual CPU structure.
6993 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6994 * out-of-sync. Make sure to update the required fields
6995 * before using them.
6996 *
6997 * @remarks No-long-jmp zone!!!
6998 */
6999DECLINLINE(int) hmR0VmxLongJmpToRing3(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7000{
7001 return hmR0VmxLeaveSession(pVCpu, pMixedCtx);
7002}
7003
7004
7005/**
7006 * Take necessary actions before going back to ring-3.
7007 *
7008 * An action requires us to go back to ring-3. This function does the necessary
7009 * steps before we can safely return to ring-3. This is not the same as longjmps
7010 * to ring-3, this is voluntary and prepares the guest so it may continue
7011 * executing outside HM (recompiler/IEM).
7012 *
7013 * @returns VBox status code.
7014 * @param pVCpu The cross context virtual CPU structure.
7015 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7016 * out-of-sync. Make sure to update the required fields
7017 * before using them.
7018 * @param rcExit The reason for exiting to ring-3. Can be
7019 * VINF_VMM_UNKNOWN_RING3_CALL.
7020 */
7021static int hmR0VmxExitToRing3(PVMCPU pVCpu, PCPUMCTX pMixedCtx, VBOXSTRICTRC rcExit)
7022{
7023 Assert(pVCpu);
7024 Assert(pMixedCtx);
7025 HMVMX_ASSERT_PREEMPT_SAFE();
7026
7027 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
7028 {
7029 VMXGetActivatedVmcs(&pVCpu->hm.s.vmx.LastError.u64VMCSPhys);
7030 pVCpu->hm.s.vmx.LastError.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
7031 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
7032 /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */
7033 }
7034
7035 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
7036 VMMRZCallRing3Disable(pVCpu);
7037 Log4Func(("rcExit=%d\n", VBOXSTRICTRC_VAL(rcExit)));
7038
7039 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
7040 if (pVCpu->hm.s.Event.fPending)
7041 {
7042 hmR0VmxPendingEventToTrpmTrap(pVCpu);
7043 Assert(!pVCpu->hm.s.Event.fPending);
7044 }
7045
7046 /* Clear interrupt-window and NMI-window controls as we re-evaluate it when we return from ring-3. */
7047 hmR0VmxClearIntNmiWindowsVmcs(pVCpu);
7048
7049 /* If we're emulating an instruction, we shouldn't have any TRPM traps pending
7050 and if we're injecting an event we should have a TRPM trap pending. */
7051 AssertMsg(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
7052#ifndef DEBUG_bird /* Triggered after firing an NMI against NT4SP1, possibly a triple fault in progress. */
7053 AssertMsg(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
7054#endif
7055
7056 /* Save guest state and restore host state bits. */
7057 int rc = hmR0VmxLeaveSession(pVCpu, pMixedCtx);
7058 AssertRCReturn(rc, rc);
7059 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
7060 /* Thread-context hooks are unregistered at this point!!! */
7061
7062 /* Sync recompiler state. */
7063 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
7064 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
7065 | CPUM_CHANGED_LDTR
7066 | CPUM_CHANGED_GDTR
7067 | CPUM_CHANGED_IDTR
7068 | CPUM_CHANGED_TR
7069 | CPUM_CHANGED_HIDDEN_SEL_REGS);
7070 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging
7071 && CPUMIsGuestPagingEnabledEx(pMixedCtx))
7072 {
7073 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
7074 }
7075
7076 Assert(!pVCpu->hm.s.fClearTrapFlag);
7077
7078 /* Update the exit-to-ring 3 reason. */
7079 pVCpu->hm.s.rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit);
7080
7081 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
7082 if (rcExit != VINF_EM_RAW_INTERRUPT)
7083 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
7084
7085 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
7086
7087 /* We do -not- want any longjmp notifications after this! We must return to ring-3 ASAP. */
7088 VMMRZCallRing3RemoveNotification(pVCpu);
7089 VMMRZCallRing3Enable(pVCpu);
7090
7091 return rc;
7092}
7093
7094
7095/**
7096 * VMMRZCallRing3() callback wrapper which saves the guest state before we
7097 * longjump to ring-3 and possibly get preempted.
7098 *
7099 * @returns VBox status code.
7100 * @param pVCpu The cross context virtual CPU structure.
7101 * @param enmOperation The operation causing the ring-3 longjump.
7102 * @param pvUser Opaque pointer to the guest-CPU context. The data
7103 * may be out-of-sync. Make sure to update the required
7104 * fields before using them.
7105 */
7106static DECLCALLBACK(int) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
7107{
7108 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
7109 {
7110 /*
7111 * !!! IMPORTANT !!!
7112 * If you modify code here, check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs to be updated too.
7113 * This is a stripped down version which gets out ASAP, trying to not trigger any further assertions.
7114 */
7115 VMMRZCallRing3RemoveNotification(pVCpu);
7116 VMMRZCallRing3Disable(pVCpu);
7117 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
7118 RTThreadPreemptDisable(&PreemptState);
7119
7120 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
7121 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
7122 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
7123
7124#if HC_ARCH_BITS == 64
7125 /* Restore host-state bits that VT-x only restores partially. */
7126 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
7127 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
7128 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
7129 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
7130#endif
7131
7132 /* Restore the lazy host MSRs as we're leaving VT-x context. */
7133 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
7134 hmR0VmxLazyRestoreHostMsrs(pVCpu);
7135
7136 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
7137 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
7138 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
7139 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
7140 {
7141 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7142 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
7143 }
7144
7145 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
7146 VMMR0ThreadCtxHookDisable(pVCpu);
7147 HMR0LeaveCpu(pVCpu);
7148 RTThreadPreemptRestore(&PreemptState);
7149 return VINF_SUCCESS;
7150 }
7151
7152 Assert(pVCpu);
7153 Assert(pvUser);
7154 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7155 HMVMX_ASSERT_PREEMPT_SAFE();
7156
7157 VMMRZCallRing3Disable(pVCpu);
7158 Assert(VMMR0IsLogFlushDisabled(pVCpu));
7159
7160 Log4Func((" -> hmR0VmxLongJmpToRing3 enmOperation=%d\n", enmOperation));
7161
7162 int rc = hmR0VmxLongJmpToRing3(pVCpu, (PCPUMCTX)pvUser);
7163 AssertRCReturn(rc, rc);
7164
7165 VMMRZCallRing3Enable(pVCpu);
7166 return VINF_SUCCESS;
7167}
7168
7169
7170/**
7171 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
7172 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
7173 *
7174 * @param pVCpu The cross context virtual CPU structure.
7175 */
7176DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
7177{
7178 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7179 {
7180 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
7181 {
7182 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7183 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7184 AssertRC(rc);
7185 Log4Func(("Setup interrupt-window exiting\n"));
7186 }
7187 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
7188}
7189
7190
7191/**
7192 * Clears the interrupt-window exiting control in the VMCS.
7193 *
7194 * @param pVCpu The cross context virtual CPU structure.
7195 */
7196DECLINLINE(void) hmR0VmxClearIntWindowExitVmcs(PVMCPU pVCpu)
7197{
7198 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
7199 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
7200 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7201 AssertRC(rc);
7202 Log4Func(("Cleared interrupt-window exiting\n"));
7203}
7204
7205
7206/**
7207 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
7208 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
7209 *
7210 * @param pVCpu The cross context virtual CPU structure.
7211 */
7212DECLINLINE(void) hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu)
7213{
7214 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7215 {
7216 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT))
7217 {
7218 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7219 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7220 AssertRC(rc);
7221 Log4Func(("Setup NMI-window exiting\n"));
7222 }
7223 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
7224}
7225
7226
7227/**
7228 * Clears the NMI-window exiting control in the VMCS.
7229 *
7230 * @param pVCpu The cross context virtual CPU structure.
7231 */
7232DECLINLINE(void) hmR0VmxClearNmiWindowExitVmcs(PVMCPU pVCpu)
7233{
7234 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT);
7235 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT;
7236 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
7237 AssertRC(rc);
7238 Log4Func(("Cleared NMI-window exiting\n"));
7239}
7240
7241
7242/**
7243 * Evaluates the event to be delivered to the guest and sets it as the pending
7244 * event.
7245 *
7246 * @returns The VT-x guest-interruptibility state.
7247 * @param pVCpu The cross context virtual CPU structure.
7248 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7249 * out-of-sync. Make sure to update the required fields
7250 * before using them.
7251 */
7252static uint32_t hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7253{
7254 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
7255 uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
7256 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7257 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7258 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
7259
7260 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));
7261 Assert(!(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7262 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7263 Assert(!TRPMHasTrap(pVCpu));
7264
7265 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
7266 APICUpdatePendingInterrupts(pVCpu);
7267
7268 /*
7269 * Toggling of interrupt force-flags here is safe since we update TRPM on premature exits
7270 * to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must NOT restore these force-flags.
7271 */
7272 /** @todo SMI. SMIs take priority over NMIs. */
7273 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */
7274 {
7275 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
7276 if ( !pVCpu->hm.s.Event.fPending
7277 && !fBlockNmi
7278 && !fBlockSti
7279 && !fBlockMovSS)
7280 {
7281 Log4Func(("Pending NMI\n"));
7282 uint32_t u32IntInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
7283 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7284
7285 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7286 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
7287 }
7288 else
7289 hmR0VmxSetNmiWindowExitVmcs(pVCpu);
7290 }
7291 /*
7292 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns
7293 * a valid interrupt we must- deliver the interrupt. We can no longer re-request it from the APIC.
7294 */
7295 else if ( VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
7296 && !pVCpu->hm.s.fSingleInstruction)
7297 {
7298 Assert(!DBGFIsStepping(pVCpu));
7299 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);
7300 AssertRCReturn(rc, 0);
7301 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7302 if ( !pVCpu->hm.s.Event.fPending
7303 && !fBlockInt
7304 && !fBlockSti
7305 && !fBlockMovSS)
7306 {
7307 uint8_t u8Interrupt;
7308 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
7309 if (RT_SUCCESS(rc))
7310 {
7311 Log4Func(("Pending external interrupt u8Interrupt=%#x\n", u8Interrupt));
7312 uint32_t u32IntInfo = u8Interrupt
7313 | VMX_EXIT_INTERRUPTION_INFO_VALID
7314 | (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7315
7316 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrfaultAddress */);
7317 }
7318 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
7319 {
7320 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
7321 hmR0VmxApicSetTprThreshold(pVCpu, u8Interrupt >> 4);
7322 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
7323
7324 /*
7325 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
7326 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
7327 * need to re-set this force-flag here.
7328 */
7329 }
7330 else
7331 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
7332 }
7333 else
7334 hmR0VmxSetIntWindowExitVmcs(pVCpu);
7335 }
7336
7337 return fIntrState;
7338}
7339
7340
7341/**
7342 * Sets a pending-debug exception to be delivered to the guest if the guest is
7343 * single-stepping in the VMCS.
7344 *
7345 * @param pVCpu The cross context virtual CPU structure.
7346 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7347 * out-of-sync. Make sure to update the required fields
7348 * before using them.
7349 */
7350DECLINLINE(int) hmR0VmxSetPendingDebugXcptVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7351{
7352 RT_NOREF(pVCpu);
7353 Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS)); NOREF(pMixedCtx);
7354 return VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
7355}
7356
7357
7358/**
7359 * Injects any pending events into the guest if the guest is in a state to
7360 * receive them.
7361 *
7362 * @returns Strict VBox status code (i.e. informational status codes too).
7363 * @param pVCpu The cross context virtual CPU structure.
7364 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7365 * out-of-sync. Make sure to update the required fields
7366 * before using them.
7367 * @param fIntrState The VT-x guest-interruptibility state.
7368 * @param fStepping Running in hmR0VmxRunGuestCodeStep() and we should
7369 * return VINF_EM_DBG_STEPPED if the event was
7370 * dispatched directly.
7371 */
7372static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t fIntrState, bool fStepping)
7373{
7374 HMVMX_ASSERT_PREEMPT_SAFE();
7375 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7376
7377 bool fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7378 bool fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7379
7380 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));
7381 Assert(!(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
7382 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
7383 Assert(!TRPMHasTrap(pVCpu));
7384
7385 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
7386 if (pVCpu->hm.s.Event.fPending)
7387 {
7388 /*
7389 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
7390 * pending even while injecting an event and in this case, we want a VM-exit as soon as
7391 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
7392 *
7393 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
7394 */
7395 uint32_t const uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
7396#ifdef VBOX_STRICT
7397 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7398 {
7399 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
7400 Assert(!fBlockInt);
7401 Assert(!fBlockSti);
7402 Assert(!fBlockMovSS);
7403 }
7404 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
7405 {
7406 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);
7407 Assert(!fBlockSti);
7408 Assert(!fBlockMovSS);
7409 Assert(!fBlockNmi);
7410 }
7411#endif
7412 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
7413 uIntType));
7414 rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
7415 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping,
7416 &fIntrState);
7417 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
7418
7419 /* Update the interruptibility-state as it could have been changed by
7420 hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */
7421 fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
7422 fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
7423
7424 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
7425 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
7426 else
7427 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
7428 }
7429
7430 /* Deliver pending debug exception if the guest is single-stepping. Evaluate and set the BS bit. */
7431 if ( fBlockSti
7432 || fBlockMovSS)
7433 {
7434 if (!pVCpu->hm.s.fSingleInstruction)
7435 {
7436 /*
7437 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD,
7438 * VMX_EXIT_MTF, VMX_EXIT_APIC_WRITE and VMX_EXIT_VIRTUALIZED_EOI.
7439 * See Intel spec. 27.3.4 "Saving Non-Register State".
7440 */
7441 Assert(!DBGFIsStepping(pVCpu));
7442 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);
7443 AssertRCReturn(rc, rc);
7444 if (pMixedCtx->eflags.Bits.u1TF)
7445 {
7446 int rc2 = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
7447 AssertRCReturn(rc2, rc2);
7448 }
7449 }
7450 else if (pMixedCtx->eflags.Bits.u1TF)
7451 {
7452 /*
7453 * We are single-stepping in the hypervisor debugger using EFLAGS.TF. Clear interrupt inhibition as setting the
7454 * BS bit would mean delivering a #DB to the guest upon VM-entry when it shouldn't be.
7455 */
7456 Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG));
7457 fIntrState = 0;
7458 }
7459 }
7460
7461 /*
7462 * There's no need to clear the VM-entry interruption-information field here if we're not injecting anything.
7463 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7464 */
7465 int rc3 = hmR0VmxExportGuestIntrState(pVCpu, fIntrState);
7466 AssertRCReturn(rc3, rc3);
7467
7468 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
7469 NOREF(fBlockMovSS); NOREF(fBlockSti);
7470 return rcStrict;
7471}
7472
7473
7474/**
7475 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
7476 *
7477 * @param pVCpu The cross context virtual CPU structure.
7478 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7479 * out-of-sync. Make sure to update the required fields
7480 * before using them.
7481 */
7482DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7483{
7484 NOREF(pMixedCtx);
7485 uint32_t u32IntInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID;
7486 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7487}
7488
7489
7490/**
7491 * Injects a double-fault (\#DF) exception into the VM.
7492 *
7493 * @returns Strict VBox status code (i.e. informational status codes too).
7494 * @param pVCpu The cross context virtual CPU structure.
7495 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7496 * out-of-sync. Make sure to update the required fields
7497 * before using them.
7498 * @param fStepping Whether we're running in hmR0VmxRunGuestCodeStep()
7499 * and should return VINF_EM_DBG_STEPPED if the event
7500 * is injected directly (register modified by us, not
7501 * by hardware on VM-entry).
7502 * @param pfIntrState Pointer to the current guest interruptibility-state.
7503 * This interruptibility-state will be updated if
7504 * necessary. This cannot not be NULL.
7505 */
7506DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCCPUMCTX pMixedCtx, bool fStepping, uint32_t *pfIntrState)
7507{
7508 NOREF(pMixedCtx);
7509 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7510 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7511 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7512 return hmR0VmxInjectEventVmcs(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */, fStepping,
7513 pfIntrState);
7514}
7515
7516
7517/**
7518 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
7519 *
7520 * @param pVCpu The cross context virtual CPU structure.
7521 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7522 * out-of-sync. Make sure to update the required fields
7523 * before using them.
7524 */
7525DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7526{
7527 NOREF(pMixedCtx);
7528 uint32_t u32IntInfo = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID;
7529 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7530 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7531}
7532
7533
7534/**
7535 * Sets an overflow (\#OF) exception as pending-for-injection into the VM.
7536 *
7537 * @param pVCpu The cross context virtual CPU structure.
7538 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7539 * out-of-sync. Make sure to update the required fields
7540 * before using them.
7541 * @param cbInstr The value of RIP that is to be pushed on the guest
7542 * stack.
7543 */
7544DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
7545{
7546 NOREF(pMixedCtx);
7547 uint32_t u32IntInfo = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID;
7548 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7549 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7550}
7551
7552
7553/**
7554 * Injects a general-protection (\#GP) fault into the VM.
7555 *
7556 * @returns Strict VBox status code (i.e. informational status codes too).
7557 * @param pVCpu The cross context virtual CPU structure.
7558 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7559 * out-of-sync. Make sure to update the required fields
7560 * before using them.
7561 * @param fErrorCodeValid Whether the error code is valid (depends on the CPU
7562 * mode, i.e. in real-mode it's not valid).
7563 * @param u32ErrorCode The error code associated with the \#GP.
7564 * @param fStepping Whether we're running in
7565 * hmR0VmxRunGuestCodeStep() and should return
7566 * VINF_EM_DBG_STEPPED if the event is injected
7567 * directly (register modified by us, not by
7568 * hardware on VM-entry).
7569 * @param pfIntrState Pointer to the current guest interruptibility-state.
7570 * This interruptibility-state will be updated if
7571 * necessary. This cannot not be NULL.
7572 */
7573DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
7574 bool fStepping, uint32_t *pfIntrState)
7575{
7576 NOREF(pMixedCtx);
7577 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7578 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7579 if (fErrorCodeValid)
7580 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7581 return hmR0VmxInjectEventVmcs(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */, fStepping,
7582 pfIntrState);
7583}
7584
7585
7586#if 0 /* unused */
7587/**
7588 * Sets a general-protection (\#GP) exception as pending-for-injection into the
7589 * VM.
7590 *
7591 * @param pVCpu The cross context virtual CPU structure.
7592 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7593 * out-of-sync. Make sure to update the required fields
7594 * before using them.
7595 * @param u32ErrorCode The error code associated with the \#GP.
7596 */
7597DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t u32ErrorCode)
7598{
7599 NOREF(pMixedCtx);
7600 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
7601 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7602 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7603 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */);
7604}
7605#endif /* unused */
7606
7607
7608/**
7609 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
7610 *
7611 * @param pVCpu The cross context virtual CPU structure.
7612 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7613 * out-of-sync. Make sure to update the required fields
7614 * before using them.
7615 * @param uVector The software interrupt vector number.
7616 * @param cbInstr The value of RIP that is to be pushed on the guest
7617 * stack.
7618 */
7619DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
7620{
7621 NOREF(pMixedCtx);
7622 uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
7623 if ( uVector == X86_XCPT_BP
7624 || uVector == X86_XCPT_OF)
7625 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7626 else
7627 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
7628 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7629}
7630
7631
7632/**
7633 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
7634 * stack.
7635 *
7636 * @returns Strict VBox status code (i.e. informational status codes too).
7637 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
7638 * @param pVM The cross context VM structure.
7639 * @param pMixedCtx Pointer to the guest-CPU context.
7640 * @param uValue The value to push to the guest stack.
7641 */
7642DECLINLINE(VBOXSTRICTRC) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
7643{
7644 /*
7645 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
7646 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
7647 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
7648 */
7649 if (pMixedCtx->sp == 1)
7650 return VINF_EM_RESET;
7651 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
7652 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
7653 AssertRC(rc);
7654 return rc;
7655}
7656
7657
7658/**
7659 * Injects an event into the guest upon VM-entry by updating the relevant fields
7660 * in the VM-entry area in the VMCS.
7661 *
7662 * @returns Strict VBox status code (i.e. informational status codes too).
7663 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
7664 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
7665 *
7666 * @param pVCpu The cross context virtual CPU structure.
7667 * @param u64IntInfo The VM-entry interruption-information field.
7668 * @param cbInstr The VM-entry instruction length in bytes (for
7669 * software interrupts, exceptions and privileged
7670 * software exceptions).
7671 * @param u32ErrCode The VM-entry exception error code.
7672 * @param GCPtrFaultAddress The page-fault address for \#PF exceptions.
7673 * @param pfIntrState Pointer to the current guest interruptibility-state.
7674 * This interruptibility-state will be updated if
7675 * necessary. This cannot not be NULL.
7676 * @param fStepping Whether we're running in
7677 * hmR0VmxRunGuestCodeStep() and should return
7678 * VINF_EM_DBG_STEPPED if the event is injected
7679 * directly (register modified by us, not by
7680 * hardware on VM-entry).
7681 */
7682static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
7683 RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *pfIntrState)
7684{
7685 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
7686 AssertMsg(!RT_HI_U32(u64IntInfo), ("%#RX64\n", u64IntInfo));
7687 Assert(pfIntrState);
7688
7689 PCPUMCTX pMixedCtx = &pVCpu->cpum.GstCtx;
7690 uint32_t u32IntInfo = (uint32_t)u64IntInfo;
7691 uint32_t const uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntInfo);
7692 uint32_t const uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo);
7693
7694#ifdef VBOX_STRICT
7695 /*
7696 * Validate the error-code-valid bit for hardware exceptions.
7697 * No error codes for exceptions in real-mode.
7698 *
7699 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
7700 */
7701 if ( uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
7702 && !CPUMIsGuestInRealModeEx(pMixedCtx))
7703 {
7704 switch (uVector)
7705 {
7706 case X86_XCPT_PF:
7707 case X86_XCPT_DF:
7708 case X86_XCPT_TS:
7709 case X86_XCPT_NP:
7710 case X86_XCPT_SS:
7711 case X86_XCPT_GP:
7712 case X86_XCPT_AC:
7713 AssertMsg(VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo),
7714 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
7715 RT_FALL_THRU();
7716 default:
7717 break;
7718 }
7719 }
7720#endif
7721
7722 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
7723 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
7724 || !(*pfIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
7725
7726 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
7727
7728 /*
7729 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
7730 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
7731 * interrupt handler in the (real-mode) guest.
7732 *
7733 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
7734 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
7735 */
7736 if (CPUMIsGuestInRealModeEx(pMixedCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
7737 {
7738 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest)
7739 {
7740 /*
7741 * For unrestricted execution enabled CPUs running real-mode guests, we must not
7742 * set the deliver-error-code bit.
7743 *
7744 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
7745 */
7746 u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
7747 }
7748 else
7749 {
7750 PVM pVM = pVCpu->CTX_SUFF(pVM);
7751 Assert(PDMVmmDevHeapIsEnabled(pVM));
7752 Assert(pVM->hm.s.vmx.pRealModeTSS);
7753
7754 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
7755 int rc2 = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK
7756 | CPUMCTX_EXTRN_TABLE_MASK
7757 | CPUMCTX_EXTRN_RIP
7758 | CPUMCTX_EXTRN_RSP
7759 | CPUMCTX_EXTRN_RFLAGS);
7760 AssertRCReturn(rc2, rc2);
7761
7762 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
7763 size_t const cbIdtEntry = sizeof(X86IDTR16);
7764 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
7765 {
7766 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
7767 if (uVector == X86_XCPT_DF)
7768 return VINF_EM_RESET;
7769
7770 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
7771 if (uVector == X86_XCPT_GP)
7772 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, pfIntrState);
7773
7774 /*
7775 * If we're injecting an event with no valid IDT entry, inject a #GP.
7776 * No error codes for exceptions in real-mode.
7777 *
7778 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
7779 */
7780 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, fStepping,
7781 pfIntrState);
7782 }
7783
7784 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
7785 uint16_t uGuestIp = pMixedCtx->ip;
7786 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
7787 {
7788 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
7789 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
7790 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
7791 }
7792 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
7793 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
7794
7795 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
7796 X86IDTR16 IdtEntry;
7797 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
7798 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
7799 AssertRCReturn(rc2, rc2);
7800
7801 /* Construct the stack frame for the interrupt/exception handler. */
7802 VBOXSTRICTRC rcStrict;
7803 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
7804 if (rcStrict == VINF_SUCCESS)
7805 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
7806 if (rcStrict == VINF_SUCCESS)
7807 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
7808
7809 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
7810 if (rcStrict == VINF_SUCCESS)
7811 {
7812 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
7813 pMixedCtx->rip = IdtEntry.offSel;
7814 pMixedCtx->cs.Sel = IdtEntry.uSel;
7815 pMixedCtx->cs.ValidSel = IdtEntry.uSel;
7816 pMixedCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
7817 if ( uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
7818 && uVector == X86_XCPT_PF)
7819 pMixedCtx->cr2 = GCPtrFaultAddress;
7820
7821 /* If any other guest-state bits are changed here, make sure to update
7822 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */
7823 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CS
7824 | HM_CHANGED_GUEST_CR2
7825 | HM_CHANGED_GUEST_RIP
7826 | HM_CHANGED_GUEST_RFLAGS
7827 | HM_CHANGED_GUEST_RSP);
7828
7829 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
7830 if (*pfIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
7831 {
7832 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
7833 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
7834 Log4Func(("Clearing inhibition due to STI\n"));
7835 *pfIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
7836 }
7837 Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
7838 u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->eflags.u, pMixedCtx->cs.Sel, pMixedCtx->eip));
7839
7840 /* The event has been truly dispatched. Mark it as no longer pending so we don't attempt to 'undo'
7841 it, if we are returning to ring-3 before executing guest code. */
7842 pVCpu->hm.s.Event.fPending = false;
7843
7844 /* Make hmR0VmxPreRunGuest() return if we're stepping since we've changed cs:rip. */
7845 if (fStepping)
7846 rcStrict = VINF_EM_DBG_STEPPED;
7847 }
7848 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
7849 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7850 return rcStrict;
7851 }
7852 }
7853
7854 /* Validate. */
7855 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
7856 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK_IRET(u32IntInfo)); /* Bit 12 MBZ. */
7857 Assert(!(u32IntInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
7858
7859 /* Inject. */
7860 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
7861 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo))
7862 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
7863 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
7864 AssertRCReturn(rc, rc);
7865
7866 /* Update CR2. */
7867 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
7868 && uVector == X86_XCPT_PF)
7869 pMixedCtx->cr2 = GCPtrFaultAddress;
7870
7871 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
7872
7873 return VINF_SUCCESS;
7874}
7875
7876
7877/**
7878 * Clears the interrupt-window exiting control in the VMCS and if necessary
7879 * clears the current event in the VMCS as well.
7880 *
7881 * @returns VBox status code.
7882 * @param pVCpu The cross context virtual CPU structure.
7883 *
7884 * @remarks Use this function only to clear events that have not yet been
7885 * delivered to the guest but are injected in the VMCS!
7886 * @remarks No-long-jump zone!!!
7887 */
7888static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu)
7889{
7890 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT)
7891 {
7892 hmR0VmxClearIntWindowExitVmcs(pVCpu);
7893 Log4Func(("Cleared interrupt widow\n"));
7894 }
7895
7896 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)
7897 {
7898 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
7899 Log4Func(("Cleared interrupt widow\n"));
7900 }
7901}
7902
7903
7904/**
7905 * Enters the VT-x session.
7906 *
7907 * @returns VBox status code.
7908 * @param pVCpu The cross context virtual CPU structure.
7909 * @param pHostCpu Pointer to the global CPU info struct.
7910 */
7911VMMR0DECL(int) VMXR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu)
7912{
7913 AssertPtr(pVCpu);
7914 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported);
7915 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7916 RT_NOREF(pHostCpu);
7917
7918 LogFlowFunc(("pVCpu=%p\n", pVCpu));
7919 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
7920 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
7921
7922#ifdef VBOX_STRICT
7923 /* At least verify VMX is enabled, since we can't check if we're in VMX root mode without #GP'ing. */
7924 RTCCUINTREG uHostCR4 = ASMGetCR4();
7925 if (!(uHostCR4 & X86_CR4_VMXE))
7926 {
7927 LogRelFunc(("X86_CR4_VMXE bit in CR4 is not set!\n"));
7928 return VERR_VMX_X86_CR4_VMXE_CLEARED;
7929 }
7930#endif
7931
7932 /*
7933 * Load the VCPU's VMCS as the current (and active) one.
7934 */
7935 Assert(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR);
7936 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
7937 if (RT_FAILURE(rc))
7938 return rc;
7939
7940 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
7941 pVCpu->hm.s.fLeaveDone = false;
7942 Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
7943
7944 return VINF_SUCCESS;
7945}
7946
7947
7948/**
7949 * The thread-context callback (only on platforms which support it).
7950 *
7951 * @param enmEvent The thread-context event.
7952 * @param pVCpu The cross context virtual CPU structure.
7953 * @param fGlobalInit Whether global VT-x/AMD-V init. was used.
7954 * @thread EMT(pVCpu)
7955 */
7956VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
7957{
7958 NOREF(fGlobalInit);
7959
7960 switch (enmEvent)
7961 {
7962 case RTTHREADCTXEVENT_OUT:
7963 {
7964 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7965 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
7966 VMCPU_ASSERT_EMT(pVCpu);
7967
7968 /* No longjmps (logger flushes, locks) in this fragile context. */
7969 VMMRZCallRing3Disable(pVCpu);
7970 Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId()));
7971
7972 /*
7973 * Restore host-state (FPU, debug etc.)
7974 */
7975 if (!pVCpu->hm.s.fLeaveDone)
7976 {
7977 /*
7978 * Do -not- import the guest-state here as we might already be in the middle of importing
7979 * it, esp. bad if we're holding the PGM lock, see comment in hmR0VmxImportGuestState().
7980 */
7981 hmR0VmxLeave(pVCpu, false /* fImportState */);
7982 pVCpu->hm.s.fLeaveDone = true;
7983 }
7984
7985 /* Leave HM context, takes care of local init (term). */
7986 int rc = HMR0LeaveCpu(pVCpu);
7987 AssertRC(rc); NOREF(rc);
7988
7989 /* Restore longjmp state. */
7990 VMMRZCallRing3Enable(pVCpu);
7991 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreempt);
7992 break;
7993 }
7994
7995 case RTTHREADCTXEVENT_IN:
7996 {
7997 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7998 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
7999 VMCPU_ASSERT_EMT(pVCpu);
8000
8001 /* No longjmps here, as we don't want to trigger preemption (& its hook) while resuming. */
8002 VMMRZCallRing3Disable(pVCpu);
8003 Log4Func(("Resumed: HostCpuId=%u\n", RTMpCpuId()));
8004
8005 /* Initialize the bare minimum state required for HM. This takes care of
8006 initializing VT-x if necessary (onlined CPUs, local init etc.) */
8007 int rc = hmR0EnterCpu(pVCpu);
8008 AssertRC(rc);
8009 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
8010 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
8011
8012 /* Load the active VMCS as the current one. */
8013 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR)
8014 {
8015 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
8016 AssertRC(rc); NOREF(rc);
8017 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
8018 Log4Func(("Resumed: Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8019 }
8020 pVCpu->hm.s.fLeaveDone = false;
8021
8022 /* Restore longjmp state. */
8023 VMMRZCallRing3Enable(pVCpu);
8024 break;
8025 }
8026
8027 default:
8028 break;
8029 }
8030}
8031
8032
8033/**
8034 * Exports the host state into the VMCS host-state area.
8035 * Sets up the VM-exit MSR-load area.
8036 *
8037 * The CPU state will be loaded from these fields on every successful VM-exit.
8038 *
8039 * @returns VBox status code.
8040 * @param pVCpu The cross context virtual CPU structure.
8041 *
8042 * @remarks No-long-jump zone!!!
8043 */
8044static int hmR0VmxExportHostState(PVMCPU pVCpu)
8045{
8046 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8047
8048 int rc = VINF_SUCCESS;
8049 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)
8050 {
8051 rc = hmR0VmxExportHostControlRegs();
8052 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8053
8054 rc = hmR0VmxExportHostSegmentRegs(pVCpu);
8055 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8056
8057 rc = hmR0VmxExportHostMsrs(pVCpu);
8058 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8059
8060 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_HOST_CONTEXT;
8061 }
8062 return rc;
8063}
8064
8065
8066/**
8067 * Saves the host state in the VMCS host-state.
8068 *
8069 * @returns VBox status code.
8070 * @param pVCpu The cross context virtual CPU structure.
8071 *
8072 * @remarks No-long-jump zone!!!
8073 */
8074VMMR0DECL(int) VMXR0ExportHostState(PVMCPU pVCpu)
8075{
8076 AssertPtr(pVCpu);
8077 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8078
8079 /*
8080 * Export the host state here while entering HM context.
8081 * When thread-context hooks are used, we might get preempted and have to re-save the host
8082 * state but most of the time we won't be, so do it here before we disable interrupts.
8083 */
8084 return hmR0VmxExportHostState(pVCpu);
8085}
8086
8087
8088/**
8089 * Exports the guest state into the VMCS guest-state area.
8090 *
8091 * The will typically be done before VM-entry when the guest-CPU state and the
8092 * VMCS state may potentially be out of sync.
8093 *
8094 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas. Sets up the
8095 * VM-entry controls.
8096 * Sets up the appropriate VMX non-root function to execute guest code based on
8097 * the guest CPU mode.
8098 *
8099 * @returns VBox strict status code.
8100 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
8101 * without unrestricted guest access and the VMMDev is not presently
8102 * mapped (e.g. EFI32).
8103 *
8104 * @param pVCpu The cross context virtual CPU structure.
8105 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8106 * out-of-sync. Make sure to update the required fields
8107 * before using them.
8108 *
8109 * @remarks No-long-jump zone!!!
8110 */
8111static VBOXSTRICTRC hmR0VmxExportGuestState(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
8112{
8113 AssertPtr(pVCpu);
8114 AssertPtr(pMixedCtx);
8115 HMVMX_ASSERT_PREEMPT_SAFE();
8116
8117 LogFlowFunc(("pVCpu=%p\n", pVCpu));
8118
8119 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
8120
8121 /* Determine real-on-v86 mode. */
8122 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
8123 if ( !pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest
8124 && CPUMIsGuestInRealModeEx(pMixedCtx))
8125 {
8126 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
8127 }
8128
8129 /*
8130 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
8131 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
8132 */
8133 int rc = hmR0VmxSelectVMRunHandler(pVCpu, pMixedCtx);
8134 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8135
8136 /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */
8137 rc = hmR0VmxExportGuestEntryCtls(pVCpu, pMixedCtx);
8138 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8139
8140 /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */
8141 rc = hmR0VmxExportGuestExitCtls(pVCpu, pMixedCtx);
8142 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8143
8144 rc = hmR0VmxExportGuestCR0(pVCpu, pMixedCtx);
8145 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8146
8147 VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu, pMixedCtx);
8148 if (rcStrict == VINF_SUCCESS)
8149 { /* likely */ }
8150 else
8151 {
8152 Assert(rcStrict == VINF_EM_RESCHEDULE_REM || RT_FAILURE_NP(rcStrict));
8153 return rcStrict;
8154 }
8155
8156 rc = hmR0VmxExportGuestSegmentRegs(pVCpu, pMixedCtx);
8157 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8158
8159 /* This needs to be done after hmR0VmxExportGuestEntryCtls() and hmR0VmxExportGuestExitCtls() as it
8160 may alter controls if we determine we don't have to swap EFER after all. */
8161 rc = hmR0VmxExportGuestMsrs(pVCpu, pMixedCtx);
8162 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8163
8164 rc = hmR0VmxExportGuestApicTpr(pVCpu);
8165 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8166
8167 rc = hmR0VmxExportGuestXcptIntercepts(pVCpu);
8168 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8169
8170 /* Exporting RFLAGS here is fine, even though RFLAGS.TF might depend on guest debug state which is
8171 not exported here. It is re-evaluated and updated if necessary in hmR0VmxExportSharedState(). */
8172 rc = hmR0VmxExportGuestRip(pVCpu, pMixedCtx);
8173 rc |= hmR0VmxExportGuestRsp(pVCpu, pMixedCtx);
8174 rc |= hmR0VmxExportGuestRflags(pVCpu, pMixedCtx);
8175 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8176
8177 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
8178 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP)
8179 | HM_CHANGED_GUEST_CR2
8180 | (HM_CHANGED_GUEST_DR_MASK & ~HM_CHANGED_GUEST_DR7)
8181 | HM_CHANGED_GUEST_X87
8182 | HM_CHANGED_GUEST_SSE_AVX
8183 | HM_CHANGED_GUEST_OTHER_XSAVE
8184 | HM_CHANGED_GUEST_XCRx
8185 | HM_CHANGED_GUEST_KERNEL_GS_BASE /* Part of lazy or auto load-store MSRs. */
8186 | HM_CHANGED_GUEST_SYSCALL_MSRS /* Part of lazy or auto load-store MSRs. */
8187 | HM_CHANGED_GUEST_TSC_AUX
8188 | HM_CHANGED_GUEST_OTHER_MSRS
8189 | HM_CHANGED_GUEST_HWVIRT
8190 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK)));
8191
8192 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x);
8193 return rc;
8194}
8195
8196
8197/**
8198 * Exports the state shared between the host and guest into the VMCS.
8199 *
8200 * @param pVCpu The cross context virtual CPU structure.
8201 * @param pCtx Pointer to the guest-CPU context.
8202 *
8203 * @remarks No-long-jump zone!!!
8204 */
8205static void hmR0VmxExportSharedState(PVMCPU pVCpu, PCPUMCTX pCtx)
8206{
8207 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8208 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8209
8210 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
8211 {
8212 int rc = hmR0VmxExportSharedDebugState(pVCpu, pCtx);
8213 AssertRC(rc);
8214 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
8215
8216 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
8217 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS)
8218 {
8219 rc = hmR0VmxExportGuestRflags(pVCpu, pCtx);
8220 AssertRC(rc);
8221 }
8222 }
8223
8224 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS)
8225 {
8226 hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
8227 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS;
8228 }
8229
8230 AssertMsg(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE),
8231 ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
8232}
8233
8234
8235/**
8236 * Worker for loading the guest-state bits in the inner VT-x execution loop.
8237 *
8238 * @returns Strict VBox status code (i.e. informational status codes too).
8239 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
8240 * without unrestricted guest access and the VMMDev is not presently
8241 * mapped (e.g. EFI32).
8242 *
8243 * @param pVCpu The cross context virtual CPU structure.
8244 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8245 * out-of-sync. Make sure to update the required fields
8246 * before using them.
8247 *
8248 * @remarks No-long-jump zone!!!
8249 */
8250static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVMCPU pVCpu, PCCPUMCTX pMixedCtx)
8251{
8252 HMVMX_ASSERT_PREEMPT_SAFE();
8253 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8254 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8255
8256#ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
8257 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
8258#endif
8259
8260 /*
8261 * For many exits it's only RIP that changes and hence try to export it first
8262 * without going through a lot of change flag checks.
8263 */
8264 VBOXSTRICTRC rcStrict;
8265 uint64_t fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
8266 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
8267 if ((fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) == HM_CHANGED_GUEST_RIP)
8268 {
8269 rcStrict = hmR0VmxExportGuestRip(pVCpu, pMixedCtx);
8270 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8271 { /* likely */}
8272 else
8273 AssertMsgFailedReturn(("hmR0VmxExportGuestRip failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
8274 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportMinimal);
8275 }
8276 else if (fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
8277 {
8278 rcStrict = hmR0VmxExportGuestState(pVCpu, pMixedCtx);
8279 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8280 { /* likely */}
8281 else
8282 {
8283 AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, ("hmR0VmxExportGuestState failed! rc=%Rrc\n",
8284 VBOXSTRICTRC_VAL(rcStrict)));
8285 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8286 return rcStrict;
8287 }
8288 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
8289 }
8290 else
8291 rcStrict = VINF_SUCCESS;
8292
8293#ifdef VBOX_STRICT
8294 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
8295 fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
8296 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
8297 AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)),
8298 ("fCtxChanged=%#RX64\n", fCtxChanged));
8299#endif
8300 return rcStrict;
8301}
8302
8303
8304/**
8305 * Does the preparations before executing guest code in VT-x.
8306 *
8307 * This may cause longjmps to ring-3 and may even result in rescheduling to the
8308 * recompiler/IEM. We must be cautious what we do here regarding committing
8309 * guest-state information into the VMCS assuming we assuredly execute the
8310 * guest in VT-x mode.
8311 *
8312 * If we fall back to the recompiler/IEM after updating the VMCS and clearing
8313 * the common-state (TRPM/forceflags), we must undo those changes so that the
8314 * recompiler/IEM can (and should) use them when it resumes guest execution.
8315 * Otherwise such operations must be done when we can no longer exit to ring-3.
8316 *
8317 * @returns Strict VBox status code (i.e. informational status codes too).
8318 * @retval VINF_SUCCESS if we can proceed with running the guest, interrupts
8319 * have been disabled.
8320 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a
8321 * double-fault into the guest.
8322 * @retval VINF_EM_DBG_STEPPED if @a fStepping is true and an event was
8323 * dispatched directly.
8324 * @retval VINF_* scheduling changes, we have to go back to ring-3.
8325 *
8326 * @param pVCpu The cross context virtual CPU structure.
8327 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8328 * out-of-sync. Make sure to update the required fields
8329 * before using them.
8330 * @param pVmxTransient Pointer to the VMX transient structure.
8331 * @param fStepping Set if called from hmR0VmxRunGuestCodeStep(). Makes
8332 * us ignore some of the reasons for returning to
8333 * ring-3, and return VINF_EM_DBG_STEPPED if event
8334 * dispatching took place.
8335 */
8336static VBOXSTRICTRC hmR0VmxPreRunGuest(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, bool fStepping)
8337{
8338 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8339
8340#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
8341 PGMRZDynMapFlushAutoSet(pVCpu);
8342#endif
8343
8344 /* Check force flag actions that might require us to go back to ring-3. */
8345 VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVCpu, pMixedCtx, fStepping);
8346 if (rcStrict == VINF_SUCCESS)
8347 { /* FFs doesn't get set all the time. */ }
8348 else
8349 return rcStrict;
8350
8351 /*
8352 * Setup the virtualized-APIC accesses.
8353 *
8354 * Note! This can cause a longjumps to R3 due to the acquisition of the PGM lock
8355 * in both PGMHandlerPhysicalReset() and IOMMMIOMapMMIOHCPage(), see @bugref{8721}.
8356 *
8357 * This is the reason we do it here and not in hmR0VmxExportGuestState().
8358 */
8359 PVM pVM = pVCpu->CTX_SUFF(pVM);
8360 if ( !pVCpu->hm.s.vmx.u64MsrApicBase
8361 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
8362 && PDMHasApic(pVM))
8363 {
8364 uint64_t const u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu);
8365 Assert(u64MsrApicBase);
8366 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
8367
8368 RTGCPHYS const GCPhysApicBase = u64MsrApicBase & PAGE_BASE_GC_MASK;
8369
8370 /* Unalias any existing mapping. */
8371 int rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
8372 AssertRCReturn(rc, rc);
8373
8374 /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */
8375 Log4Func(("Mapped HC APIC-access page at %#RGp\n", GCPhysApicBase));
8376 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
8377 AssertRCReturn(rc, rc);
8378
8379 /* Update the per-VCPU cache of the APIC base MSR. */
8380 pVCpu->hm.s.vmx.u64MsrApicBase = u64MsrApicBase;
8381 }
8382
8383 if (TRPMHasTrap(pVCpu))
8384 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
8385 uint32_t fIntrState = hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);
8386
8387 /*
8388 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus
8389 * needs to be done with longjmps or interrupts + preemption enabled. Event injection might
8390 * also result in triple-faulting the VM.
8391 */
8392 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fIntrState, fStepping);
8393 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8394 { /* likely */ }
8395 else
8396 {
8397 AssertMsg(rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
8398 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8399 return rcStrict;
8400 }
8401
8402 /*
8403 * A longjump might result in importing CR3 even for VM-exits that don't necessarily
8404 * import CR3 themselves. We will need to update them here as even as late as the above
8405 * hmR0VmxInjectPendingEvent() call may lazily import guest-CPU state on demand causing
8406 * the below force flags to be set.
8407 */
8408 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
8409 {
8410 Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
8411 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
8412 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
8413 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
8414 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
8415 }
8416 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
8417 {
8418 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
8419 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
8420 }
8421
8422 /*
8423 * No longjmps to ring-3 from this point on!!!
8424 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
8425 * This also disables flushing of the R0-logger instance (if any).
8426 */
8427 VMMRZCallRing3Disable(pVCpu);
8428
8429 /*
8430 * Export the guest state bits.
8431 *
8432 * We cannot perform longjmps while loading the guest state because we do not preserve the
8433 * host/guest state (although the VMCS will be preserved) across longjmps which can cause
8434 * CPU migration.
8435 *
8436 * If we are injecting events to a real-on-v86 mode guest, we will have to update
8437 * RIP and some segment registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs().
8438 * Hence, loading of the guest state needs to be done -after- injection of events.
8439 */
8440 rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu, pMixedCtx);
8441 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8442 { /* likely */ }
8443 else
8444 {
8445 VMMRZCallRing3Enable(pVCpu);
8446 return rcStrict;
8447 }
8448
8449 /*
8450 * We disable interrupts so that we don't miss any interrupts that would flag preemption
8451 * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with
8452 * preemption disabled for a while. Since this is purly to aid the
8453 * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and
8454 * disable interrupt on NT.
8455 *
8456 * We need to check for force-flags that could've possible been altered since we last
8457 * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section,
8458 * see @bugref{6398}).
8459 *
8460 * We also check a couple of other force-flags as a last opportunity to get the EMT back
8461 * to ring-3 before executing guest code.
8462 */
8463 pVmxTransient->fEFlags = ASMIntDisableFlags();
8464
8465 if ( ( !VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
8466 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
8467 || ( fStepping /* Optimized for the non-stepping case, so a bit of unnecessary work when stepping. */
8468 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) )
8469 {
8470 if (!RTThreadPreemptIsPending(NIL_RTTHREAD))
8471 {
8472 pVCpu->hm.s.Event.fPending = false;
8473
8474 /*
8475 * We've injected any pending events. This is really the point of no return (to ring-3).
8476 *
8477 * Note! The caller expects to continue with interrupts & longjmps disabled on successful
8478 * returns from this function, so don't enable them here.
8479 */
8480 return VINF_SUCCESS;
8481 }
8482
8483 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
8484 rcStrict = VINF_EM_RAW_INTERRUPT;
8485 }
8486 else
8487 {
8488 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
8489 rcStrict = VINF_EM_RAW_TO_R3;
8490 }
8491
8492 ASMSetFlags(pVmxTransient->fEFlags);
8493 VMMRZCallRing3Enable(pVCpu);
8494
8495 return rcStrict;
8496}
8497
8498
8499/**
8500 * Prepares to run guest code in VT-x and we've committed to doing so. This
8501 * means there is no backing out to ring-3 or anywhere else at this
8502 * point.
8503 *
8504 * @param pVCpu The cross context virtual CPU structure.
8505 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
8506 * out-of-sync. Make sure to update the required fields
8507 * before using them.
8508 * @param pVmxTransient Pointer to the VMX transient structure.
8509 *
8510 * @remarks Called with preemption disabled.
8511 * @remarks No-long-jump zone!!!
8512 */
8513static void hmR0VmxPreRunGuestCommitted(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8514{
8515 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8516 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8517 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8518
8519 /*
8520 * Indicate start of guest execution and where poking EMT out of guest-context is recognized.
8521 */
8522 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8523 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
8524
8525 PVM pVM = pVCpu->CTX_SUFF(pVM);
8526 if (!CPUMIsGuestFPUStateActive(pVCpu))
8527 {
8528 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);
8529 if (CPUMR0LoadGuestFPU(pVM, pVCpu) == VINF_CPUM_HOST_CR0_MODIFIED)
8530 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT;
8531 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);
8532 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);
8533 }
8534
8535 /*
8536 * Lazy-update of the host MSRs values in the auto-load/store MSR area.
8537 */
8538 if ( !pVCpu->hm.s.vmx.fUpdatedHostMsrs
8539 && pVCpu->hm.s.vmx.cMsrs > 0)
8540 hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu);
8541
8542 /*
8543 * Re-save the host state bits as we may've been preempted (only happens when
8544 * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM).
8545 * Note that the 64-on-32 switcher saves the (64-bit) host state into the VMCS and
8546 * if we change the switcher back to 32-bit, we *must* save the 32-bit host state here.
8547 * See @bugref{8432}.
8548 */
8549 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)
8550 {
8551 int rc = hmR0VmxExportHostState(pVCpu);
8552 AssertRC(rc);
8553 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreemptExportHostState);
8554 }
8555 Assert(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT));
8556
8557 /*
8558 * Export the state shared between host and guest (FPU, debug, lazy MSRs).
8559 */
8560 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)
8561 hmR0VmxExportSharedState(pVCpu, pMixedCtx);
8562 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
8563
8564 /* Store status of the shared guest-host state at the time of VM-entry. */
8565#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
8566 if (CPUMIsGuestInLongModeEx(pMixedCtx))
8567 {
8568 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
8569 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
8570 }
8571 else
8572#endif
8573 {
8574 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
8575 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
8576 }
8577
8578 /*
8579 * Cache the TPR-shadow for checking on every VM-exit if it might have changed.
8580 */
8581 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8582 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR];
8583
8584 PHMGLOBALCPUINFO pCpu = hmR0GetCurrentCpu();
8585 RTCPUID idCurrentCpu = pCpu->idCpu;
8586 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
8587 || idCurrentCpu != pVCpu->hm.s.idLastCpu)
8588 {
8589 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu);
8590 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
8591 }
8592
8593 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */
8594 hmR0VmxFlushTaggedTlb(pVCpu, pCpu); /* Invalidate the appropriate guest entries from the TLB. */
8595 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
8596 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */
8597
8598 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
8599
8600 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
8601 to start executing. */
8602
8603 /*
8604 * Load the TSC_AUX MSR when we are not intercepting RDTSCP.
8605 */
8606 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
8607 {
8608 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8609 {
8610 bool fMsrUpdated;
8611 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_TSC_AUX);
8612 int rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu), true /* fUpdateHostMsr */,
8613 &fMsrUpdated);
8614 AssertRC(rc2);
8615 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8616 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */
8617 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
8618 }
8619 else
8620 {
8621 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX);
8622 Assert(!pVCpu->hm.s.vmx.cMsrs || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8623 }
8624 }
8625
8626 if (pVM->cpum.ro.GuestFeatures.fIbrs)
8627 {
8628 bool fMsrUpdated;
8629 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_OTHER_MSRS);
8630 int rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu), true /* fUpdateHostMsr */,
8631 &fMsrUpdated);
8632 AssertRC(rc2);
8633 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs);
8634 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */
8635 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
8636 }
8637
8638#ifdef VBOX_STRICT
8639 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu);
8640 hmR0VmxCheckHostEferMsr(pVCpu);
8641 AssertRC(hmR0VmxCheckVmcsCtls(pVCpu));
8642#endif
8643#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
8644 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
8645 {
8646 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pMixedCtx);
8647 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
8648 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
8649 }
8650#endif
8651}
8652
8653
8654/**
8655 * Performs some essential restoration of state after running guest code in
8656 * VT-x.
8657 *
8658 * @param pVCpu The cross context virtual CPU structure.
8659 * @param pVmxTransient Pointer to the VMX transient structure.
8660 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
8661 *
8662 * @remarks Called with interrupts disabled, and returns with interrupts enabled!
8663 *
8664 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
8665 * unconditionally when it is safe to do so.
8666 */
8667static void hmR0VmxPostRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, int rcVMRun)
8668{
8669 uint64_t const uHostTsc = ASMReadTSC();
8670 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8671
8672 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
8673 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */
8674 pVCpu->hm.s.fCtxChanged = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */
8675 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
8676 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
8677 pVmxTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
8678
8679 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
8680 TMCpuTickSetLastSeen(pVCpu, uHostTsc + pVCpu->hm.s.vmx.u64TscOffset);
8681
8682 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x);
8683 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
8684 Assert(!ASMIntAreEnabled());
8685 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
8686
8687#if HC_ARCH_BITS == 64
8688 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Host state messed up by VT-x, we must restore. */
8689#endif
8690#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
8691 /* The 64-on-32 switcher maintains uVmcsState on its own and we need to leave it alone here. */
8692 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
8693 pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
8694#else
8695 pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
8696#endif
8697#ifdef VBOX_STRICT
8698 hmR0VmxCheckHostEferMsr(pVCpu); /* Verify that VMRUN/VMLAUNCH didn't modify host EFER. */
8699#endif
8700 ASMSetFlags(pVmxTransient->fEFlags); /* Enable interrupts. */
8701
8702 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
8703 uint32_t uExitReason;
8704 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
8705 rc |= hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
8706 AssertRC(rc);
8707 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
8708 pVmxTransient->fVMEntryFailed = VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uEntryIntInfo);
8709
8710 if (rcVMRun == VINF_SUCCESS)
8711 {
8712 /*
8713 * Update the VM-exit history array here even if the VM-entry failed due to:
8714 * - Invalid guest state.
8715 * - MSR loading.
8716 * - Machine-check event.
8717 *
8718 * In any of the above cases we will still have a "valid" VM-exit reason
8719 * despite @a fVMEntryFailed being false.
8720 *
8721 * See Intel spec. 26.7 "VM-Entry failures during or after loading guest state".
8722 *
8723 * Note! We don't have CS or RIP at this point. Will probably address that later
8724 * by amending the history entry added here.
8725 */
8726 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_VMX, pVmxTransient->uExitReason & EMEXIT_F_TYPE_MASK),
8727 UINT64_MAX, uHostTsc);
8728
8729 if (!pVmxTransient->fVMEntryFailed)
8730 {
8731 VMMRZCallRing3Enable(pVCpu);
8732
8733 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
8734 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
8735
8736#if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
8737 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
8738 AssertRC(rc);
8739#elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
8740 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_RFLAGS);
8741 AssertRC(rc);
8742#else
8743 /*
8744 * Import the guest-interruptibility state always as we need it while evaluating
8745 * injecting events on re-entry.
8746 *
8747 * We don't import CR0 (when Unrestricted guest execution is unavailable) despite
8748 * checking for real-mode while exporting the state because all bits that cause
8749 * mode changes wrt CR0 are intercepted.
8750 */
8751 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_HM_VMX_INT_STATE);
8752 AssertRC(rc);
8753#endif
8754
8755 /*
8756 * Sync the TPR shadow with our APIC state.
8757 */
8758 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
8759 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR])
8760 {
8761 rc = APICSetTpr(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR]);
8762 AssertRC(rc);
8763 ASMAtomicOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
8764 }
8765
8766 return;
8767 }
8768 }
8769 else
8770 {
8771 Log4Func(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));
8772 }
8773
8774 VMMRZCallRing3Enable(pVCpu);
8775}
8776
8777
8778/**
8779 * Runs the guest code using VT-x the normal way.
8780 *
8781 * @returns VBox status code.
8782 * @param pVCpu The cross context virtual CPU structure.
8783 * @param pCtx Pointer to the guest-CPU context.
8784 *
8785 * @note Mostly the same as hmR0VmxRunGuestCodeStep().
8786 */
8787static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVMCPU pVCpu, PCPUMCTX pCtx)
8788{
8789 VMXTRANSIENT VmxTransient;
8790 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
8791 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
8792 uint32_t cLoops = 0;
8793
8794 for (;; cLoops++)
8795 {
8796 Assert(!HMR0SuspendPending());
8797 HMVMX_ASSERT_CPU_SAFE();
8798
8799 /* Preparatory work for running guest code, this may force us to return
8800 to ring-3. This bugger disables interrupts on VINF_SUCCESS! */
8801 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
8802 rcStrict = hmR0VmxPreRunGuest(pVCpu, pCtx, &VmxTransient, false /* fStepping */);
8803 if (rcStrict != VINF_SUCCESS)
8804 break;
8805
8806 hmR0VmxPreRunGuestCommitted(pVCpu, pCtx, &VmxTransient);
8807 int rcRun = hmR0VmxRunGuest(pVCpu, pCtx);
8808 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
8809
8810 /* Restore any residual host-state and save any bits shared between host
8811 and guest into the guest-CPU state. Re-enables interrupts! */
8812 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
8813
8814 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
8815 if (RT_SUCCESS(rcRun))
8816 { /* very likely */ }
8817 else
8818 {
8819 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
8820 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, pCtx, &VmxTransient);
8821 return rcRun;
8822 }
8823
8824 /* Profile the VM-exit. */
8825 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
8826 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
8827 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
8828 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
8829 HMVMX_START_EXIT_DISPATCH_PROF();
8830
8831 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
8832
8833 /* Handle the VM-exit. */
8834#ifdef HMVMX_USE_FUNCTION_TABLE
8835 rcStrict = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
8836#else
8837 rcStrict = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
8838#endif
8839 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
8840 if (rcStrict == VINF_SUCCESS)
8841 {
8842 if (cLoops <= pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops)
8843 continue; /* likely */
8844 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
8845 rcStrict = VINF_EM_RAW_INTERRUPT;
8846 }
8847 break;
8848 }
8849
8850 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
8851 return rcStrict;
8852}
8853
8854
8855
8856/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
8857 * probes.
8858 *
8859 * The following few functions and associated structure contains the bloat
8860 * necessary for providing detailed debug events and dtrace probes as well as
8861 * reliable host side single stepping. This works on the principle of
8862 * "subclassing" the normal execution loop and workers. We replace the loop
8863 * method completely and override selected helpers to add necessary adjustments
8864 * to their core operation.
8865 *
8866 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
8867 * any performance for debug and analysis features.
8868 *
8869 * @{
8870 */
8871
8872/**
8873 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
8874 * the debug run loop.
8875 */
8876typedef struct VMXRUNDBGSTATE
8877{
8878 /** The RIP we started executing at. This is for detecting that we stepped. */
8879 uint64_t uRipStart;
8880 /** The CS we started executing with. */
8881 uint16_t uCsStart;
8882
8883 /** Whether we've actually modified the 1st execution control field. */
8884 bool fModifiedProcCtls : 1;
8885 /** Whether we've actually modified the 2nd execution control field. */
8886 bool fModifiedProcCtls2 : 1;
8887 /** Whether we've actually modified the exception bitmap. */
8888 bool fModifiedXcptBitmap : 1;
8889
8890 /** We desire the modified the CR0 mask to be cleared. */
8891 bool fClearCr0Mask : 1;
8892 /** We desire the modified the CR4 mask to be cleared. */
8893 bool fClearCr4Mask : 1;
8894 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
8895 uint32_t fCpe1Extra;
8896 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
8897 uint32_t fCpe1Unwanted;
8898 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
8899 uint32_t fCpe2Extra;
8900 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
8901 uint32_t bmXcptExtra;
8902 /** The sequence number of the Dtrace provider settings the state was
8903 * configured against. */
8904 uint32_t uDtraceSettingsSeqNo;
8905 /** VM-exits to check (one bit per VM-exit). */
8906 uint32_t bmExitsToCheck[3];
8907
8908 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
8909 uint32_t fProcCtlsInitial;
8910 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
8911 uint32_t fProcCtls2Initial;
8912 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
8913 uint32_t bmXcptInitial;
8914} VMXRUNDBGSTATE;
8915AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
8916typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
8917
8918
8919/**
8920 * Initializes the VMXRUNDBGSTATE structure.
8921 *
8922 * @param pVCpu The cross context virtual CPU structure of the
8923 * calling EMT.
8924 * @param pCtx The CPU register context to go with @a pVCpu.
8925 * @param pDbgState The structure to initialize.
8926 */
8927static void hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PCCPUMCTX pCtx, PVMXRUNDBGSTATE pDbgState)
8928{
8929 pDbgState->uRipStart = pCtx->rip;
8930 pDbgState->uCsStart = pCtx->cs.Sel;
8931
8932 pDbgState->fModifiedProcCtls = false;
8933 pDbgState->fModifiedProcCtls2 = false;
8934 pDbgState->fModifiedXcptBitmap = false;
8935 pDbgState->fClearCr0Mask = false;
8936 pDbgState->fClearCr4Mask = false;
8937 pDbgState->fCpe1Extra = 0;
8938 pDbgState->fCpe1Unwanted = 0;
8939 pDbgState->fCpe2Extra = 0;
8940 pDbgState->bmXcptExtra = 0;
8941 pDbgState->fProcCtlsInitial = pVCpu->hm.s.vmx.u32ProcCtls;
8942 pDbgState->fProcCtls2Initial = pVCpu->hm.s.vmx.u32ProcCtls2;
8943 pDbgState->bmXcptInitial = pVCpu->hm.s.vmx.u32XcptBitmap;
8944}
8945
8946
8947/**
8948 * Updates the VMSC fields with changes requested by @a pDbgState.
8949 *
8950 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
8951 * immediately before executing guest code, i.e. when interrupts are disabled.
8952 * We don't check status codes here as we cannot easily assert or return in the
8953 * latter case.
8954 *
8955 * @param pVCpu The cross context virtual CPU structure.
8956 * @param pDbgState The debug state.
8957 */
8958static void hmR0VmxPreRunGuestDebugStateApply(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState)
8959{
8960 /*
8961 * Ensure desired flags in VMCS control fields are set.
8962 * (Ignoring write failure here, as we're committed and it's just debug extras.)
8963 *
8964 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
8965 * there should be no stale data in pCtx at this point.
8966 */
8967 if ( (pVCpu->hm.s.vmx.u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
8968 || (pVCpu->hm.s.vmx.u32ProcCtls & pDbgState->fCpe1Unwanted))
8969 {
8970 pVCpu->hm.s.vmx.u32ProcCtls |= pDbgState->fCpe1Extra;
8971 pVCpu->hm.s.vmx.u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
8972 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
8973 Log6(("hmR0VmxRunDebugStateRevert: VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVCpu->hm.s.vmx.u32ProcCtls));
8974 pDbgState->fModifiedProcCtls = true;
8975 }
8976
8977 if ((pVCpu->hm.s.vmx.u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
8978 {
8979 pVCpu->hm.s.vmx.u32ProcCtls2 |= pDbgState->fCpe2Extra;
8980 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pVCpu->hm.s.vmx.u32ProcCtls2);
8981 Log6(("hmR0VmxRunDebugStateRevert: VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVCpu->hm.s.vmx.u32ProcCtls2));
8982 pDbgState->fModifiedProcCtls2 = true;
8983 }
8984
8985 if ((pVCpu->hm.s.vmx.u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
8986 {
8987 pVCpu->hm.s.vmx.u32XcptBitmap |= pDbgState->bmXcptExtra;
8988 VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
8989 Log6(("hmR0VmxRunDebugStateRevert: VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVCpu->hm.s.vmx.u32XcptBitmap));
8990 pDbgState->fModifiedXcptBitmap = true;
8991 }
8992
8993 if (pDbgState->fClearCr0Mask && pVCpu->hm.s.vmx.u32Cr0Mask != 0)
8994 {
8995 pVCpu->hm.s.vmx.u32Cr0Mask = 0;
8996 VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, 0);
8997 Log6(("hmR0VmxRunDebugStateRevert: VMX_VMCS_CTRL_CR0_MASK: 0\n"));
8998 }
8999
9000 if (pDbgState->fClearCr4Mask && pVCpu->hm.s.vmx.u32Cr4Mask != 0)
9001 {
9002 pVCpu->hm.s.vmx.u32Cr4Mask = 0;
9003 VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, 0);
9004 Log6(("hmR0VmxRunDebugStateRevert: VMX_VMCS_CTRL_CR4_MASK: 0\n"));
9005 }
9006}
9007
9008
9009static VBOXSTRICTRC hmR0VmxRunDebugStateRevert(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, VBOXSTRICTRC rcStrict)
9010{
9011 /*
9012 * Restore VM-exit control settings as we may not reenter this function the
9013 * next time around.
9014 */
9015 /* We reload the initial value, trigger what we can of recalculations the
9016 next time around. From the looks of things, that's all that's required atm. */
9017 if (pDbgState->fModifiedProcCtls)
9018 {
9019 if (!(pDbgState->fProcCtlsInitial & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
9020 pDbgState->fProcCtlsInitial |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
9021 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
9022 AssertRCReturn(rc2, rc2);
9023 pVCpu->hm.s.vmx.u32ProcCtls = pDbgState->fProcCtlsInitial;
9024 }
9025
9026 /* We're currently the only ones messing with this one, so just restore the
9027 cached value and reload the field. */
9028 if ( pDbgState->fModifiedProcCtls2
9029 && pVCpu->hm.s.vmx.u32ProcCtls2 != pDbgState->fProcCtls2Initial)
9030 {
9031 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
9032 AssertRCReturn(rc2, rc2);
9033 pVCpu->hm.s.vmx.u32ProcCtls2 = pDbgState->fProcCtls2Initial;
9034 }
9035
9036 /* If we've modified the exception bitmap, we restore it and trigger
9037 reloading and partial recalculation the next time around. */
9038 if (pDbgState->fModifiedXcptBitmap)
9039 pVCpu->hm.s.vmx.u32XcptBitmap = pDbgState->bmXcptInitial;
9040
9041 return rcStrict;
9042}
9043
9044
9045/**
9046 * Configures VM-exit controls for current DBGF and DTrace settings.
9047 *
9048 * This updates @a pDbgState and the VMCS execution control fields to reflect
9049 * the necessary VM-exits demanded by DBGF and DTrace.
9050 *
9051 * @param pVCpu The cross context virtual CPU structure.
9052 * @param pDbgState The debug state.
9053 * @param pVmxTransient Pointer to the VMX transient structure. May update
9054 * fUpdateTscOffsettingAndPreemptTimer.
9055 */
9056static void hmR0VmxPreRunGuestDebugStateUpdate(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, PVMXTRANSIENT pVmxTransient)
9057{
9058 /*
9059 * Take down the dtrace serial number so we can spot changes.
9060 */
9061 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
9062 ASMCompilerBarrier();
9063
9064 /*
9065 * We'll rebuild most of the middle block of data members (holding the
9066 * current settings) as we go along here, so start by clearing it all.
9067 */
9068 pDbgState->bmXcptExtra = 0;
9069 pDbgState->fCpe1Extra = 0;
9070 pDbgState->fCpe1Unwanted = 0;
9071 pDbgState->fCpe2Extra = 0;
9072 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
9073 pDbgState->bmExitsToCheck[i] = 0;
9074
9075 /*
9076 * Software interrupts (INT XXh) - no idea how to trigger these...
9077 */
9078 PVM pVM = pVCpu->CTX_SUFF(pVM);
9079 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
9080 || VBOXVMM_INT_SOFTWARE_ENABLED())
9081 {
9082 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
9083 }
9084
9085 /*
9086 * INT3 breakpoints - triggered by #BP exceptions.
9087 */
9088 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
9089 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
9090
9091 /*
9092 * Exception bitmap and XCPT events+probes.
9093 */
9094 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
9095 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
9096 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
9097
9098 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
9099 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
9100 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
9101 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
9102 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
9103 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
9104 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
9105 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
9106 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
9107 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
9108 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
9109 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
9110 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
9111 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
9112 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
9113 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
9114 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
9115 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
9116
9117 if (pDbgState->bmXcptExtra)
9118 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
9119
9120 /*
9121 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
9122 *
9123 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
9124 * So, when adding/changing/removing please don't forget to update it.
9125 *
9126 * Some of the macros are picking up local variables to save horizontal space,
9127 * (being able to see it in a table is the lesser evil here).
9128 */
9129#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
9130 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
9131 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
9132#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
9133 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
9134 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
9135 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
9136 } else do { } while (0)
9137#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
9138 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
9139 { \
9140 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
9141 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
9142 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
9143 } else do { } while (0)
9144#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
9145 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
9146 { \
9147 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
9148 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
9149 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
9150 } else do { } while (0)
9151#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
9152 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
9153 { \
9154 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
9155 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
9156 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
9157 } else do { } while (0)
9158
9159 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
9160 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
9161 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
9162 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
9163 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
9164
9165 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
9166 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
9167 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
9168 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
9169 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT); /* paranoia */
9170 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
9171 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
9172 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
9173 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT);
9174 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
9175 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT);
9176 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
9177 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT);
9178 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
9179 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
9180 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
9181 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
9182 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
9183 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
9184 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
9185 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
9186 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
9187 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
9188 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
9189 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
9190 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
9191 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
9192 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
9193 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
9194 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
9195 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
9196 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
9197 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
9198 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
9199 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
9200 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
9201
9202 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
9203 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
9204 {
9205 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0
9206 | CPUMCTX_EXTRN_CR4
9207 | CPUMCTX_EXTRN_APIC_TPR);
9208 AssertRC(rc);
9209
9210#if 0 /** @todo fix me */
9211 pDbgState->fClearCr0Mask = true;
9212 pDbgState->fClearCr4Mask = true;
9213#endif
9214 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
9215 pDbgState->fCpe1Extra |= VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT;
9216 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
9217 pDbgState->fCpe1Extra |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT;
9218 pDbgState->fCpe1Unwanted |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* risky? */
9219 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
9220 require clearing here and in the loop if we start using it. */
9221 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
9222 }
9223 else
9224 {
9225 if (pDbgState->fClearCr0Mask)
9226 {
9227 pDbgState->fClearCr0Mask = false;
9228 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);
9229 }
9230 if (pDbgState->fClearCr4Mask)
9231 {
9232 pDbgState->fClearCr4Mask = false;
9233 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4);
9234 }
9235 }
9236 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
9237 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
9238
9239 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
9240 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
9241 {
9242 /** @todo later, need to fix handler as it assumes this won't usually happen. */
9243 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
9244 }
9245 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
9246 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
9247
9248 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS); /* risky clearing this? */
9249 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
9250 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS);
9251 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
9252 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT); /* paranoia */
9253 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
9254 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT); /* paranoia */
9255 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
9256#if 0 /** @todo too slow, fix handler. */
9257 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT);
9258#endif
9259 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
9260
9261 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
9262 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
9263 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
9264 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
9265 {
9266 pDbgState->fCpe2Extra |= VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT;
9267 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XDTR_ACCESS);
9268 }
9269 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_XDTR_ACCESS);
9270 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_XDTR_ACCESS);
9271 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_XDTR_ACCESS);
9272 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_XDTR_ACCESS);
9273
9274 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
9275 || IS_EITHER_ENABLED(pVM, INSTR_STR)
9276 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
9277 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
9278 {
9279 pDbgState->fCpe2Extra |= VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT;
9280 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_TR_ACCESS);
9281 }
9282 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_TR_ACCESS);
9283 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_TR_ACCESS);
9284 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_TR_ACCESS);
9285 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_TR_ACCESS);
9286
9287 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
9288 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
9289 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT);
9290 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
9291 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
9292 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
9293 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT);
9294 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
9295 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
9296 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
9297 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT);
9298 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
9299 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT);
9300 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
9301 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
9302 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
9303 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_VMCS_CTRL_PROC_EXEC2_RDSEED_EXIT);
9304 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
9305 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
9306 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
9307 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
9308 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
9309
9310#undef IS_EITHER_ENABLED
9311#undef SET_ONLY_XBM_IF_EITHER_EN
9312#undef SET_CPE1_XBM_IF_EITHER_EN
9313#undef SET_CPEU_XBM_IF_EITHER_EN
9314#undef SET_CPE2_XBM_IF_EITHER_EN
9315
9316 /*
9317 * Sanitize the control stuff.
9318 */
9319 pDbgState->fCpe2Extra &= pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1;
9320 if (pDbgState->fCpe2Extra)
9321 pDbgState->fCpe1Extra |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
9322 pDbgState->fCpe1Extra &= pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1;
9323 pDbgState->fCpe1Unwanted &= ~pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0;
9324 if (pVCpu->hm.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
9325 {
9326 pVCpu->hm.s.fDebugWantRdTscExit ^= true;
9327 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
9328 }
9329
9330 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
9331 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
9332 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
9333 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
9334}
9335
9336
9337/**
9338 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
9339 * appropriate.
9340 *
9341 * The caller has checked the VM-exit against the
9342 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
9343 * already, so we don't have to do that either.
9344 *
9345 * @returns Strict VBox status code (i.e. informational status codes too).
9346 * @param pVCpu The cross context virtual CPU structure.
9347 * @param pMixedCtx Pointer to the guest-CPU context.
9348 * @param pVmxTransient Pointer to the VMX-transient structure.
9349 * @param uExitReason The VM-exit reason.
9350 *
9351 * @remarks The name of this function is displayed by dtrace, so keep it short
9352 * and to the point. No longer than 33 chars long, please.
9353 */
9354static VBOXSTRICTRC hmR0VmxHandleExitDtraceEvents(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
9355 uint32_t uExitReason)
9356{
9357 /*
9358 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
9359 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
9360 *
9361 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
9362 * does. Must add/change/remove both places. Same ordering, please.
9363 *
9364 * Added/removed events must also be reflected in the next section
9365 * where we dispatch dtrace events.
9366 */
9367 bool fDtrace1 = false;
9368 bool fDtrace2 = false;
9369 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
9370 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
9371 uint32_t uEventArg = 0;
9372#define SET_EXIT(a_EventSubName) \
9373 do { \
9374 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
9375 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
9376 } while (0)
9377#define SET_BOTH(a_EventSubName) \
9378 do { \
9379 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
9380 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
9381 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
9382 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
9383 } while (0)
9384 switch (uExitReason)
9385 {
9386 case VMX_EXIT_MTF:
9387 return hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient);
9388
9389 case VMX_EXIT_XCPT_OR_NMI:
9390 {
9391 uint8_t const idxVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
9392 switch (VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo))
9393 {
9394 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
9395 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT:
9396 case VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT:
9397 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
9398 {
9399 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uExitIntInfo))
9400 {
9401 hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
9402 uEventArg = pVmxTransient->uExitIntErrorCode;
9403 }
9404 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
9405 switch (enmEvent1)
9406 {
9407 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
9408 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
9409 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
9410 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
9411 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
9412 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
9413 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
9414 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
9415 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
9416 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
9417 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
9418 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
9419 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
9420 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
9421 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
9422 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
9423 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
9424 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
9425 default: break;
9426 }
9427 }
9428 else
9429 AssertFailed();
9430 break;
9431
9432 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT:
9433 uEventArg = idxVector;
9434 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
9435 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
9436 break;
9437 }
9438 break;
9439 }
9440
9441 case VMX_EXIT_TRIPLE_FAULT:
9442 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
9443 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
9444 break;
9445 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
9446 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
9447 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
9448 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
9449 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
9450
9451 /* Instruction specific VM-exits: */
9452 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
9453 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
9454 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
9455 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
9456 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
9457 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
9458 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
9459 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
9460 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
9461 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
9462 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
9463 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
9464 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
9465 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
9466 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
9467 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
9468 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
9469 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
9470 case VMX_EXIT_MOV_CRX:
9471 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9472 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
9473 SET_BOTH(CRX_READ);
9474 else
9475 SET_BOTH(CRX_WRITE);
9476 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQualification);
9477 break;
9478 case VMX_EXIT_MOV_DRX:
9479 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9480 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification)
9481 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
9482 SET_BOTH(DRX_READ);
9483 else
9484 SET_BOTH(DRX_WRITE);
9485 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification);
9486 break;
9487 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
9488 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
9489 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
9490 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
9491 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
9492 case VMX_EXIT_XDTR_ACCESS:
9493 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
9494 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_XDTR_INSINFO_INSTR_ID))
9495 {
9496 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
9497 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
9498 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
9499 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
9500 }
9501 break;
9502
9503 case VMX_EXIT_TR_ACCESS:
9504 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
9505 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_YYTR_INSINFO_INSTR_ID))
9506 {
9507 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
9508 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
9509 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
9510 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
9511 }
9512 break;
9513
9514 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
9515 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
9516 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
9517 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
9518 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
9519 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
9520 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
9521 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
9522 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
9523 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
9524 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
9525
9526 /* Events that aren't relevant at this point. */
9527 case VMX_EXIT_EXT_INT:
9528 case VMX_EXIT_INT_WINDOW:
9529 case VMX_EXIT_NMI_WINDOW:
9530 case VMX_EXIT_TPR_BELOW_THRESHOLD:
9531 case VMX_EXIT_PREEMPT_TIMER:
9532 case VMX_EXIT_IO_INSTR:
9533 break;
9534
9535 /* Errors and unexpected events. */
9536 case VMX_EXIT_INIT_SIGNAL:
9537 case VMX_EXIT_SIPI:
9538 case VMX_EXIT_IO_SMI:
9539 case VMX_EXIT_SMI:
9540 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
9541 case VMX_EXIT_ERR_MSR_LOAD:
9542 case VMX_EXIT_ERR_MACHINE_CHECK:
9543 break;
9544
9545 default:
9546 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
9547 break;
9548 }
9549#undef SET_BOTH
9550#undef SET_EXIT
9551
9552 /*
9553 * Dtrace tracepoints go first. We do them here at once so we don't
9554 * have to copy the guest state saving and stuff a few dozen times.
9555 * Down side is that we've got to repeat the switch, though this time
9556 * we use enmEvent since the probes are a subset of what DBGF does.
9557 */
9558 if (fDtrace1 || fDtrace2)
9559 {
9560 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9561 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
9562 switch (enmEvent1)
9563 {
9564 /** @todo consider which extra parameters would be helpful for each probe. */
9565 case DBGFEVENT_END: break;
9566 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pMixedCtx); break;
9567 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pMixedCtx, pMixedCtx->dr[6]); break;
9568 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pMixedCtx); break;
9569 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pMixedCtx); break;
9570 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pMixedCtx); break;
9571 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pMixedCtx); break;
9572 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pMixedCtx); break;
9573 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pMixedCtx); break;
9574 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pMixedCtx, uEventArg); break;
9575 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pMixedCtx, uEventArg); break;
9576 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pMixedCtx, uEventArg); break;
9577 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pMixedCtx, uEventArg); break;
9578 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pMixedCtx, uEventArg, pMixedCtx->cr2); break;
9579 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pMixedCtx); break;
9580 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pMixedCtx); break;
9581 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pMixedCtx); break;
9582 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pMixedCtx); break;
9583 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pMixedCtx, uEventArg); break;
9584 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9585 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pMixedCtx, pMixedCtx->eax, pMixedCtx->ecx); break;
9586 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pMixedCtx); break;
9587 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pMixedCtx); break;
9588 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pMixedCtx); break;
9589 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pMixedCtx); break;
9590 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pMixedCtx); break;
9591 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pMixedCtx); break;
9592 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pMixedCtx); break;
9593 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9594 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9595 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9596 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9597 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pMixedCtx, pMixedCtx->ecx); break;
9598 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pMixedCtx, pMixedCtx->ecx,
9599 RT_MAKE_U64(pMixedCtx->eax, pMixedCtx->edx)); break;
9600 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pMixedCtx); break;
9601 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pMixedCtx); break;
9602 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pMixedCtx); break;
9603 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pMixedCtx); break;
9604 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pMixedCtx); break;
9605 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pMixedCtx); break;
9606 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pMixedCtx); break;
9607 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pMixedCtx); break;
9608 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pMixedCtx); break;
9609 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pMixedCtx); break;
9610 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pMixedCtx); break;
9611 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pMixedCtx); break;
9612 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pMixedCtx); break;
9613 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pMixedCtx); break;
9614 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pMixedCtx); break;
9615 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pMixedCtx); break;
9616 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pMixedCtx); break;
9617 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pMixedCtx); break;
9618 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pMixedCtx); break;
9619 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pMixedCtx); break;
9620 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pMixedCtx); break;
9621 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pMixedCtx); break;
9622 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pMixedCtx); break;
9623 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pMixedCtx); break;
9624 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pMixedCtx); break;
9625 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pMixedCtx); break;
9626 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pMixedCtx); break;
9627 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pMixedCtx); break;
9628 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pMixedCtx); break;
9629 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pMixedCtx); break;
9630 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pMixedCtx); break;
9631 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pMixedCtx); break;
9632 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
9633 }
9634 switch (enmEvent2)
9635 {
9636 /** @todo consider which extra parameters would be helpful for each probe. */
9637 case DBGFEVENT_END: break;
9638 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pMixedCtx); break;
9639 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pMixedCtx, pMixedCtx->eax, pMixedCtx->ecx); break;
9640 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pMixedCtx); break;
9641 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pMixedCtx); break;
9642 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pMixedCtx); break;
9643 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pMixedCtx); break;
9644 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pMixedCtx); break;
9645 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pMixedCtx); break;
9646 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pMixedCtx); break;
9647 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9648 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9649 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9650 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pMixedCtx, (uint8_t)uEventArg); break;
9651 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pMixedCtx, pMixedCtx->ecx); break;
9652 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pMixedCtx, pMixedCtx->ecx,
9653 RT_MAKE_U64(pMixedCtx->eax, pMixedCtx->edx)); break;
9654 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pMixedCtx); break;
9655 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pMixedCtx); break;
9656 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pMixedCtx); break;
9657 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pMixedCtx); break;
9658 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pMixedCtx); break;
9659 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pMixedCtx); break;
9660 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pMixedCtx); break;
9661 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pMixedCtx); break;
9662 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pMixedCtx); break;
9663 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pMixedCtx); break;
9664 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pMixedCtx); break;
9665 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pMixedCtx); break;
9666 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pMixedCtx); break;
9667 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pMixedCtx); break;
9668 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pMixedCtx); break;
9669 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pMixedCtx); break;
9670 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pMixedCtx); break;
9671 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pMixedCtx); break;
9672 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pMixedCtx); break;
9673 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pMixedCtx); break;
9674 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pMixedCtx); break;
9675 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pMixedCtx); break;
9676 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pMixedCtx); break;
9677 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pMixedCtx); break;
9678 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pMixedCtx); break;
9679 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pMixedCtx); break;
9680 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pMixedCtx); break;
9681 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pMixedCtx); break;
9682 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pMixedCtx); break;
9683 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pMixedCtx); break;
9684 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pMixedCtx); break;
9685 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pMixedCtx); break;
9686 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pMixedCtx); break;
9687 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pMixedCtx); break;
9688 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pMixedCtx); break;
9689 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pMixedCtx); break;
9690 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
9691 }
9692 }
9693
9694 /*
9695 * Fire of the DBGF event, if enabled (our check here is just a quick one,
9696 * the DBGF call will do a full check).
9697 *
9698 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
9699 * Note! If we have to events, we prioritize the first, i.e. the instruction
9700 * one, in order to avoid event nesting.
9701 */
9702 PVM pVM = pVCpu->CTX_SUFF(pVM);
9703 if ( enmEvent1 != DBGFEVENT_END
9704 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
9705 {
9706 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArg(pVM, pVCpu, enmEvent1, uEventArg, DBGFEVENTCTX_HM);
9707 if (rcStrict != VINF_SUCCESS)
9708 return rcStrict;
9709 }
9710 else if ( enmEvent2 != DBGFEVENT_END
9711 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
9712 {
9713 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArg(pVM, pVCpu, enmEvent2, uEventArg, DBGFEVENTCTX_HM);
9714 if (rcStrict != VINF_SUCCESS)
9715 return rcStrict;
9716 }
9717
9718 return VINF_SUCCESS;
9719}
9720
9721
9722/**
9723 * Single-stepping VM-exit filtering.
9724 *
9725 * This is preprocessing the VM-exits and deciding whether we've gotten far
9726 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
9727 * handling is performed.
9728 *
9729 * @returns Strict VBox status code (i.e. informational status codes too).
9730 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9731 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
9732 * out-of-sync. Make sure to update the required
9733 * fields before using them.
9734 * @param pVmxTransient Pointer to the VMX-transient structure.
9735 * @param uExitReason The VM-exit reason.
9736 * @param pDbgState The debug state.
9737 */
9738DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient,
9739 uint32_t uExitReason, PVMXRUNDBGSTATE pDbgState)
9740{
9741 /*
9742 * Expensive (saves context) generic dtrace VM-exit probe.
9743 */
9744 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
9745 { /* more likely */ }
9746 else
9747 {
9748 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9749 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
9750 AssertRC(rc);
9751 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pMixedCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQualification);
9752 }
9753
9754 /*
9755 * Check for host NMI, just to get that out of the way.
9756 */
9757 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
9758 { /* normally likely */ }
9759 else
9760 {
9761 int rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
9762 AssertRCReturn(rc2, rc2);
9763 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
9764 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
9765 return hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient);
9766 }
9767
9768 /*
9769 * Check for single stepping event if we're stepping.
9770 */
9771 if (pVCpu->hm.s.fSingleInstruction)
9772 {
9773 switch (uExitReason)
9774 {
9775 case VMX_EXIT_MTF:
9776 return hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient);
9777
9778 /* Various events: */
9779 case VMX_EXIT_XCPT_OR_NMI:
9780 case VMX_EXIT_EXT_INT:
9781 case VMX_EXIT_TRIPLE_FAULT:
9782 case VMX_EXIT_INT_WINDOW:
9783 case VMX_EXIT_NMI_WINDOW:
9784 case VMX_EXIT_TASK_SWITCH:
9785 case VMX_EXIT_TPR_BELOW_THRESHOLD:
9786 case VMX_EXIT_APIC_ACCESS:
9787 case VMX_EXIT_EPT_VIOLATION:
9788 case VMX_EXIT_EPT_MISCONFIG:
9789 case VMX_EXIT_PREEMPT_TIMER:
9790
9791 /* Instruction specific VM-exits: */
9792 case VMX_EXIT_CPUID:
9793 case VMX_EXIT_GETSEC:
9794 case VMX_EXIT_HLT:
9795 case VMX_EXIT_INVD:
9796 case VMX_EXIT_INVLPG:
9797 case VMX_EXIT_RDPMC:
9798 case VMX_EXIT_RDTSC:
9799 case VMX_EXIT_RSM:
9800 case VMX_EXIT_VMCALL:
9801 case VMX_EXIT_VMCLEAR:
9802 case VMX_EXIT_VMLAUNCH:
9803 case VMX_EXIT_VMPTRLD:
9804 case VMX_EXIT_VMPTRST:
9805 case VMX_EXIT_VMREAD:
9806 case VMX_EXIT_VMRESUME:
9807 case VMX_EXIT_VMWRITE:
9808 case VMX_EXIT_VMXOFF:
9809 case VMX_EXIT_VMXON:
9810 case VMX_EXIT_MOV_CRX:
9811 case VMX_EXIT_MOV_DRX:
9812 case VMX_EXIT_IO_INSTR:
9813 case VMX_EXIT_RDMSR:
9814 case VMX_EXIT_WRMSR:
9815 case VMX_EXIT_MWAIT:
9816 case VMX_EXIT_MONITOR:
9817 case VMX_EXIT_PAUSE:
9818 case VMX_EXIT_XDTR_ACCESS:
9819 case VMX_EXIT_TR_ACCESS:
9820 case VMX_EXIT_INVEPT:
9821 case VMX_EXIT_RDTSCP:
9822 case VMX_EXIT_INVVPID:
9823 case VMX_EXIT_WBINVD:
9824 case VMX_EXIT_XSETBV:
9825 case VMX_EXIT_RDRAND:
9826 case VMX_EXIT_INVPCID:
9827 case VMX_EXIT_VMFUNC:
9828 case VMX_EXIT_RDSEED:
9829 case VMX_EXIT_XSAVES:
9830 case VMX_EXIT_XRSTORS:
9831 {
9832 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP
9833 | CPUMCTX_EXTRN_CS);
9834 AssertRCReturn(rc, rc);
9835 if ( pMixedCtx->rip != pDbgState->uRipStart
9836 || pMixedCtx->cs.Sel != pDbgState->uCsStart)
9837 return VINF_EM_DBG_STEPPED;
9838 break;
9839 }
9840
9841 /* Errors and unexpected events: */
9842 case VMX_EXIT_INIT_SIGNAL:
9843 case VMX_EXIT_SIPI:
9844 case VMX_EXIT_IO_SMI:
9845 case VMX_EXIT_SMI:
9846 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
9847 case VMX_EXIT_ERR_MSR_LOAD:
9848 case VMX_EXIT_ERR_MACHINE_CHECK:
9849 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
9850 break;
9851
9852 default:
9853 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
9854 break;
9855 }
9856 }
9857
9858 /*
9859 * Check for debugger event breakpoints and dtrace probes.
9860 */
9861 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
9862 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
9863 {
9864 VBOXSTRICTRC rcStrict = hmR0VmxHandleExitDtraceEvents(pVCpu, pMixedCtx, pVmxTransient, uExitReason);
9865 if (rcStrict != VINF_SUCCESS)
9866 return rcStrict;
9867 }
9868
9869 /*
9870 * Normal processing.
9871 */
9872#ifdef HMVMX_USE_FUNCTION_TABLE
9873 return g_apfnVMExitHandlers[uExitReason](pVCpu, pMixedCtx, pVmxTransient);
9874#else
9875 return hmR0VmxHandleExit(pVCpu, pMixedCtx, pVmxTransient, uExitReason);
9876#endif
9877}
9878
9879
9880/**
9881 * Single steps guest code using VT-x.
9882 *
9883 * @returns Strict VBox status code (i.e. informational status codes too).
9884 * @param pVCpu The cross context virtual CPU structure.
9885 * @param pCtx Pointer to the guest-CPU context.
9886 *
9887 * @note Mostly the same as hmR0VmxRunGuestCodeNormal().
9888 */
9889static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVMCPU pVCpu, PCPUMCTX pCtx)
9890{
9891 VMXTRANSIENT VmxTransient;
9892 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
9893
9894 /* Set HMCPU indicators. */
9895 bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction;
9896 pVCpu->hm.s.fSingleInstruction = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
9897 pVCpu->hm.s.fDebugWantRdTscExit = false;
9898 pVCpu->hm.s.fUsingDebugLoop = true;
9899
9900 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */
9901 VMXRUNDBGSTATE DbgState;
9902 hmR0VmxRunDebugStateInit(pVCpu, pCtx, &DbgState);
9903 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &DbgState, &VmxTransient);
9904
9905 /*
9906 * The loop.
9907 */
9908 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
9909 for (uint32_t cLoops = 0; ; cLoops++)
9910 {
9911 Assert(!HMR0SuspendPending());
9912 HMVMX_ASSERT_CPU_SAFE();
9913 bool fStepping = pVCpu->hm.s.fSingleInstruction;
9914
9915 /*
9916 * Preparatory work for running guest code, this may force us to return
9917 * to ring-3. This bugger disables interrupts on VINF_SUCCESS!
9918 */
9919 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
9920 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Set up execute controls the next to can respond to. */
9921 rcStrict = hmR0VmxPreRunGuest(pVCpu, pCtx, &VmxTransient, fStepping);
9922 if (rcStrict != VINF_SUCCESS)
9923 break;
9924
9925 hmR0VmxPreRunGuestCommitted(pVCpu, pCtx, &VmxTransient);
9926 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &DbgState); /* Override any obnoxious code in the above two calls. */
9927
9928 /*
9929 * Now we can run the guest code.
9930 */
9931 int rcRun = hmR0VmxRunGuest(pVCpu, pCtx);
9932
9933 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
9934
9935 /*
9936 * Restore any residual host-state and save any bits shared between host
9937 * and guest into the guest-CPU state. Re-enables interrupts!
9938 */
9939 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
9940
9941 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
9942 if (RT_SUCCESS(rcRun))
9943 { /* very likely */ }
9944 else
9945 {
9946 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
9947 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, pCtx, &VmxTransient);
9948 return rcRun;
9949 }
9950
9951 /* Profile the VM-exit. */
9952 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
9953 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
9954 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
9955 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
9956 HMVMX_START_EXIT_DISPATCH_PROF();
9957
9958 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, pCtx, VmxTransient.uExitReason);
9959
9960 /*
9961 * Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitDebug().
9962 */
9963 rcStrict = hmR0VmxRunDebugHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason, &DbgState);
9964 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
9965 if (rcStrict != VINF_SUCCESS)
9966 break;
9967 if (cLoops > pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops)
9968 {
9969 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
9970 rcStrict = VINF_EM_RAW_INTERRUPT;
9971 break;
9972 }
9973
9974 /*
9975 * Stepping: Did the RIP change, if so, consider it a single step.
9976 * Otherwise, make sure one of the TFs gets set.
9977 */
9978 if (fStepping)
9979 {
9980 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP
9981 | CPUMCTX_EXTRN_CS);
9982 AssertRC(rc);
9983 if ( pCtx->rip != DbgState.uRipStart
9984 || pCtx->cs.Sel != DbgState.uCsStart)
9985 {
9986 rcStrict = VINF_EM_DBG_STEPPED;
9987 break;
9988 }
9989 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
9990 }
9991
9992 /*
9993 * Update when dtrace settings changes (DBGF kicks us, so no need to check).
9994 */
9995 if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo)
9996 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &DbgState, &VmxTransient);
9997 }
9998
9999 /*
10000 * Clear the X86_EFL_TF if necessary.
10001 */
10002 if (pVCpu->hm.s.fClearTrapFlag)
10003 {
10004 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);
10005 AssertRC(rc);
10006 pVCpu->hm.s.fClearTrapFlag = false;
10007 pCtx->eflags.Bits.u1TF = 0;
10008 }
10009 /** @todo there seems to be issues with the resume flag when the monitor trap
10010 * flag is pending without being used. Seen early in bios init when
10011 * accessing APIC page in protected mode. */
10012
10013 /*
10014 * Restore VM-exit control settings as we may not reenter this function the
10015 * next time around.
10016 */
10017 rcStrict = hmR0VmxRunDebugStateRevert(pVCpu, &DbgState, rcStrict);
10018
10019 /* Restore HMCPU indicators. */
10020 pVCpu->hm.s.fUsingDebugLoop = false;
10021 pVCpu->hm.s.fDebugWantRdTscExit = false;
10022 pVCpu->hm.s.fSingleInstruction = fSavedSingleInstruction;
10023
10024 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
10025 return rcStrict;
10026}
10027
10028
10029/** @} */
10030
10031
10032/**
10033 * Checks if any expensive dtrace probes are enabled and we should go to the
10034 * debug loop.
10035 *
10036 * @returns true if we should use debug loop, false if not.
10037 */
10038static bool hmR0VmxAnyExpensiveProbesEnabled(void)
10039{
10040 /* It's probably faster to OR the raw 32-bit counter variables together.
10041 Since the variables are in an array and the probes are next to one
10042 another (more or less), we have good locality. So, better read
10043 eight-nine cache lines ever time and only have one conditional, than
10044 128+ conditionals, right? */
10045 return ( VBOXVMM_R0_HMVMX_VMEXIT_ENABLED_RAW() /* expensive too due to context */
10046 | VBOXVMM_XCPT_DE_ENABLED_RAW()
10047 | VBOXVMM_XCPT_DB_ENABLED_RAW()
10048 | VBOXVMM_XCPT_BP_ENABLED_RAW()
10049 | VBOXVMM_XCPT_OF_ENABLED_RAW()
10050 | VBOXVMM_XCPT_BR_ENABLED_RAW()
10051 | VBOXVMM_XCPT_UD_ENABLED_RAW()
10052 | VBOXVMM_XCPT_NM_ENABLED_RAW()
10053 | VBOXVMM_XCPT_DF_ENABLED_RAW()
10054 | VBOXVMM_XCPT_TS_ENABLED_RAW()
10055 | VBOXVMM_XCPT_NP_ENABLED_RAW()
10056 | VBOXVMM_XCPT_SS_ENABLED_RAW()
10057 | VBOXVMM_XCPT_GP_ENABLED_RAW()
10058 | VBOXVMM_XCPT_PF_ENABLED_RAW()
10059 | VBOXVMM_XCPT_MF_ENABLED_RAW()
10060 | VBOXVMM_XCPT_AC_ENABLED_RAW()
10061 | VBOXVMM_XCPT_XF_ENABLED_RAW()
10062 | VBOXVMM_XCPT_VE_ENABLED_RAW()
10063 | VBOXVMM_XCPT_SX_ENABLED_RAW()
10064 | VBOXVMM_INT_SOFTWARE_ENABLED_RAW()
10065 | VBOXVMM_INT_HARDWARE_ENABLED_RAW()
10066 ) != 0
10067 || ( VBOXVMM_INSTR_HALT_ENABLED_RAW()
10068 | VBOXVMM_INSTR_MWAIT_ENABLED_RAW()
10069 | VBOXVMM_INSTR_MONITOR_ENABLED_RAW()
10070 | VBOXVMM_INSTR_CPUID_ENABLED_RAW()
10071 | VBOXVMM_INSTR_INVD_ENABLED_RAW()
10072 | VBOXVMM_INSTR_WBINVD_ENABLED_RAW()
10073 | VBOXVMM_INSTR_INVLPG_ENABLED_RAW()
10074 | VBOXVMM_INSTR_RDTSC_ENABLED_RAW()
10075 | VBOXVMM_INSTR_RDTSCP_ENABLED_RAW()
10076 | VBOXVMM_INSTR_RDPMC_ENABLED_RAW()
10077 | VBOXVMM_INSTR_RDMSR_ENABLED_RAW()
10078 | VBOXVMM_INSTR_WRMSR_ENABLED_RAW()
10079 | VBOXVMM_INSTR_CRX_READ_ENABLED_RAW()
10080 | VBOXVMM_INSTR_CRX_WRITE_ENABLED_RAW()
10081 | VBOXVMM_INSTR_DRX_READ_ENABLED_RAW()
10082 | VBOXVMM_INSTR_DRX_WRITE_ENABLED_RAW()
10083 | VBOXVMM_INSTR_PAUSE_ENABLED_RAW()
10084 | VBOXVMM_INSTR_XSETBV_ENABLED_RAW()
10085 | VBOXVMM_INSTR_SIDT_ENABLED_RAW()
10086 | VBOXVMM_INSTR_LIDT_ENABLED_RAW()
10087 | VBOXVMM_INSTR_SGDT_ENABLED_RAW()
10088 | VBOXVMM_INSTR_LGDT_ENABLED_RAW()
10089 | VBOXVMM_INSTR_SLDT_ENABLED_RAW()
10090 | VBOXVMM_INSTR_LLDT_ENABLED_RAW()
10091 | VBOXVMM_INSTR_STR_ENABLED_RAW()
10092 | VBOXVMM_INSTR_LTR_ENABLED_RAW()
10093 | VBOXVMM_INSTR_GETSEC_ENABLED_RAW()
10094 | VBOXVMM_INSTR_RSM_ENABLED_RAW()
10095 | VBOXVMM_INSTR_RDRAND_ENABLED_RAW()
10096 | VBOXVMM_INSTR_RDSEED_ENABLED_RAW()
10097 | VBOXVMM_INSTR_XSAVES_ENABLED_RAW()
10098 | VBOXVMM_INSTR_XRSTORS_ENABLED_RAW()
10099 | VBOXVMM_INSTR_VMM_CALL_ENABLED_RAW()
10100 | VBOXVMM_INSTR_VMX_VMCLEAR_ENABLED_RAW()
10101 | VBOXVMM_INSTR_VMX_VMLAUNCH_ENABLED_RAW()
10102 | VBOXVMM_INSTR_VMX_VMPTRLD_ENABLED_RAW()
10103 | VBOXVMM_INSTR_VMX_VMPTRST_ENABLED_RAW()
10104 | VBOXVMM_INSTR_VMX_VMREAD_ENABLED_RAW()
10105 | VBOXVMM_INSTR_VMX_VMRESUME_ENABLED_RAW()
10106 | VBOXVMM_INSTR_VMX_VMWRITE_ENABLED_RAW()
10107 | VBOXVMM_INSTR_VMX_VMXOFF_ENABLED_RAW()
10108 | VBOXVMM_INSTR_VMX_VMXON_ENABLED_RAW()
10109 | VBOXVMM_INSTR_VMX_VMFUNC_ENABLED_RAW()
10110 | VBOXVMM_INSTR_VMX_INVEPT_ENABLED_RAW()
10111 | VBOXVMM_INSTR_VMX_INVVPID_ENABLED_RAW()
10112 | VBOXVMM_INSTR_VMX_INVPCID_ENABLED_RAW()
10113 ) != 0
10114 || ( VBOXVMM_EXIT_TASK_SWITCH_ENABLED_RAW()
10115 | VBOXVMM_EXIT_HALT_ENABLED_RAW()
10116 | VBOXVMM_EXIT_MWAIT_ENABLED_RAW()
10117 | VBOXVMM_EXIT_MONITOR_ENABLED_RAW()
10118 | VBOXVMM_EXIT_CPUID_ENABLED_RAW()
10119 | VBOXVMM_EXIT_INVD_ENABLED_RAW()
10120 | VBOXVMM_EXIT_WBINVD_ENABLED_RAW()
10121 | VBOXVMM_EXIT_INVLPG_ENABLED_RAW()
10122 | VBOXVMM_EXIT_RDTSC_ENABLED_RAW()
10123 | VBOXVMM_EXIT_RDTSCP_ENABLED_RAW()
10124 | VBOXVMM_EXIT_RDPMC_ENABLED_RAW()
10125 | VBOXVMM_EXIT_RDMSR_ENABLED_RAW()
10126 | VBOXVMM_EXIT_WRMSR_ENABLED_RAW()
10127 | VBOXVMM_EXIT_CRX_READ_ENABLED_RAW()
10128 | VBOXVMM_EXIT_CRX_WRITE_ENABLED_RAW()
10129 | VBOXVMM_EXIT_DRX_READ_ENABLED_RAW()
10130 | VBOXVMM_EXIT_DRX_WRITE_ENABLED_RAW()
10131 | VBOXVMM_EXIT_PAUSE_ENABLED_RAW()
10132 | VBOXVMM_EXIT_XSETBV_ENABLED_RAW()
10133 | VBOXVMM_EXIT_SIDT_ENABLED_RAW()
10134 | VBOXVMM_EXIT_LIDT_ENABLED_RAW()
10135 | VBOXVMM_EXIT_SGDT_ENABLED_RAW()
10136 | VBOXVMM_EXIT_LGDT_ENABLED_RAW()
10137 | VBOXVMM_EXIT_SLDT_ENABLED_RAW()
10138 | VBOXVMM_EXIT_LLDT_ENABLED_RAW()
10139 | VBOXVMM_EXIT_STR_ENABLED_RAW()
10140 | VBOXVMM_EXIT_LTR_ENABLED_RAW()
10141 | VBOXVMM_EXIT_GETSEC_ENABLED_RAW()
10142 | VBOXVMM_EXIT_RSM_ENABLED_RAW()
10143 | VBOXVMM_EXIT_RDRAND_ENABLED_RAW()
10144 | VBOXVMM_EXIT_RDSEED_ENABLED_RAW()
10145 | VBOXVMM_EXIT_XSAVES_ENABLED_RAW()
10146 | VBOXVMM_EXIT_XRSTORS_ENABLED_RAW()
10147 | VBOXVMM_EXIT_VMM_CALL_ENABLED_RAW()
10148 | VBOXVMM_EXIT_VMX_VMCLEAR_ENABLED_RAW()
10149 | VBOXVMM_EXIT_VMX_VMLAUNCH_ENABLED_RAW()
10150 | VBOXVMM_EXIT_VMX_VMPTRLD_ENABLED_RAW()
10151 | VBOXVMM_EXIT_VMX_VMPTRST_ENABLED_RAW()
10152 | VBOXVMM_EXIT_VMX_VMREAD_ENABLED_RAW()
10153 | VBOXVMM_EXIT_VMX_VMRESUME_ENABLED_RAW()
10154 | VBOXVMM_EXIT_VMX_VMWRITE_ENABLED_RAW()
10155 | VBOXVMM_EXIT_VMX_VMXOFF_ENABLED_RAW()
10156 | VBOXVMM_EXIT_VMX_VMXON_ENABLED_RAW()
10157 | VBOXVMM_EXIT_VMX_VMFUNC_ENABLED_RAW()
10158 | VBOXVMM_EXIT_VMX_INVEPT_ENABLED_RAW()
10159 | VBOXVMM_EXIT_VMX_INVVPID_ENABLED_RAW()
10160 | VBOXVMM_EXIT_VMX_INVPCID_ENABLED_RAW()
10161 | VBOXVMM_EXIT_VMX_EPT_VIOLATION_ENABLED_RAW()
10162 | VBOXVMM_EXIT_VMX_EPT_MISCONFIG_ENABLED_RAW()
10163 | VBOXVMM_EXIT_VMX_VAPIC_ACCESS_ENABLED_RAW()
10164 | VBOXVMM_EXIT_VMX_VAPIC_WRITE_ENABLED_RAW()
10165 ) != 0;
10166}
10167
10168
10169/**
10170 * Runs the guest code using VT-x.
10171 *
10172 * @returns Strict VBox status code (i.e. informational status codes too).
10173 * @param pVCpu The cross context virtual CPU structure.
10174 * @param pCtx Pointer to the guest-CPU context.
10175 */
10176VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPU pVCpu, PCPUMCTX pCtx)
10177{
10178 Assert(VMMRZCallRing3IsEnabled(pVCpu));
10179 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
10180 HMVMX_ASSERT_PREEMPT_SAFE();
10181
10182 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pCtx);
10183
10184 VBOXSTRICTRC rcStrict;
10185 if ( !pVCpu->hm.s.fUseDebugLoop
10186 && (!VBOXVMM_ANY_PROBES_ENABLED() || !hmR0VmxAnyExpensiveProbesEnabled())
10187 && !DBGFIsStepping(pVCpu)
10188 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
10189 rcStrict = hmR0VmxRunGuestCodeNormal(pVCpu, pCtx);
10190 else
10191 rcStrict = hmR0VmxRunGuestCodeDebug(pVCpu, pCtx);
10192
10193 if (rcStrict == VERR_EM_INTERPRETER)
10194 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
10195 else if (rcStrict == VINF_EM_RESET)
10196 rcStrict = VINF_EM_TRIPLE_FAULT;
10197
10198 int rc2 = hmR0VmxExitToRing3(pVCpu, pCtx, rcStrict);
10199 if (RT_FAILURE(rc2))
10200 {
10201 pVCpu->hm.s.u32HMError = (uint32_t)VBOXSTRICTRC_VAL(rcStrict);
10202 rcStrict = rc2;
10203 }
10204 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
10205 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
10206 return rcStrict;
10207}
10208
10209
10210#ifndef HMVMX_USE_FUNCTION_TABLE
10211DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
10212{
10213#ifdef DEBUG_ramshankar
10214#define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
10215 do { \
10216 if (a_fSave != 0) \
10217 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); \
10218 VBOXSTRICTRC rcStrict = a_CallExpr; \
10219 if (a_fSave != 0) \
10220 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \
10221 return rcStrict; \
10222 } while (0)
10223#else
10224# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
10225#endif
10226 switch (rcReason)
10227 {
10228 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient));
10229 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient));
10230 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient));
10231 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient));
10232 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient));
10233 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient));
10234 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient));
10235 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient));
10236 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient));
10237 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient));
10238 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient));
10239 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient));
10240 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient));
10241 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient));
10242 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient));
10243 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient));
10244 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient));
10245 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient));
10246 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient));
10247 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient));
10248 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient));
10249 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient));
10250 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient));
10251 case VMX_EXIT_RSM: VMEXIT_CALL_RET(0, hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient));
10252 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient));
10253 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient));
10254 case VMX_EXIT_XDTR_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));
10255 case VMX_EXIT_TR_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));
10256 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient));
10257 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient));
10258 case VMX_EXIT_RDRAND: VMEXIT_CALL_RET(0, hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient));
10259 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient));
10260 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient));
10261 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient));
10262
10263 case VMX_EXIT_TRIPLE_FAULT: return hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient);
10264 case VMX_EXIT_NMI_WINDOW: return hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient);
10265 case VMX_EXIT_INIT_SIGNAL: return hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient);
10266 case VMX_EXIT_SIPI: return hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient);
10267 case VMX_EXIT_IO_SMI: return hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient);
10268 case VMX_EXIT_SMI: return hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient);
10269 case VMX_EXIT_ERR_MSR_LOAD: return hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient);
10270 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient);
10271 case VMX_EXIT_ERR_MACHINE_CHECK: return hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient);
10272
10273 case VMX_EXIT_VMCLEAR:
10274 case VMX_EXIT_VMLAUNCH:
10275 case VMX_EXIT_VMPTRLD:
10276 case VMX_EXIT_VMPTRST:
10277 case VMX_EXIT_VMREAD:
10278 case VMX_EXIT_VMRESUME:
10279 case VMX_EXIT_VMWRITE:
10280 case VMX_EXIT_VMXOFF:
10281 case VMX_EXIT_VMXON:
10282 case VMX_EXIT_INVEPT:
10283 case VMX_EXIT_INVVPID:
10284 case VMX_EXIT_VMFUNC:
10285 case VMX_EXIT_XSAVES:
10286 case VMX_EXIT_XRSTORS:
10287 return hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
10288
10289 case VMX_EXIT_ENCLS:
10290 case VMX_EXIT_RDSEED: /* only spurious VM-exits, so undefined */
10291 case VMX_EXIT_PML_FULL:
10292 default:
10293 return hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
10294 }
10295#undef VMEXIT_CALL_RET
10296}
10297#endif /* !HMVMX_USE_FUNCTION_TABLE */
10298
10299
10300#ifdef VBOX_STRICT
10301/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
10302# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
10303 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
10304
10305# define HMVMX_ASSERT_PREEMPT_CPUID() \
10306 do { \
10307 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
10308 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
10309 } while (0)
10310
10311# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
10312 do { \
10313 AssertPtr(pVCpu); \
10314 AssertPtr(pMixedCtx); \
10315 AssertPtr(pVmxTransient); \
10316 Assert(pVmxTransient->fVMEntryFailed == false); \
10317 Assert(ASMIntAreEnabled()); \
10318 HMVMX_ASSERT_PREEMPT_SAFE(); \
10319 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
10320 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", pVCpu->idCpu)); \
10321 HMVMX_ASSERT_PREEMPT_SAFE(); \
10322 if (VMMR0IsLogFlushDisabled(pVCpu)) \
10323 HMVMX_ASSERT_PREEMPT_CPUID(); \
10324 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
10325 } while (0)
10326
10327# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
10328 do { \
10329 Log4Func(("\n")); \
10330 } while (0)
10331#else /* nonstrict builds: */
10332# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
10333 do { \
10334 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
10335 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient); \
10336 } while (0)
10337# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while (0)
10338#endif
10339
10340
10341/**
10342 * Advances the guest RIP by the specified number of bytes.
10343 *
10344 * @param pVCpu The cross context virtual CPU structure.
10345 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
10346 * out-of-sync. Make sure to update the required fields
10347 * before using them.
10348 * @param cbInstr Number of bytes to advance the RIP by.
10349 *
10350 * @remarks No-long-jump zone!!!
10351 */
10352DECLINLINE(void) hmR0VmxAdvanceGuestRipBy(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
10353{
10354 /* Advance the RIP. */
10355 pMixedCtx->rip += cbInstr;
10356 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
10357
10358 /* Update interrupt inhibition. */
10359 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
10360 && pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
10361 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
10362}
10363
10364
10365/**
10366 * Advances the guest RIP after reading it from the VMCS.
10367 *
10368 * @returns VBox status code, no informational status codes.
10369 * @param pVCpu The cross context virtual CPU structure.
10370 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
10371 * out-of-sync. Make sure to update the required fields
10372 * before using them.
10373 * @param pVmxTransient Pointer to the VMX transient structure.
10374 *
10375 * @remarks No-long-jump zone!!!
10376 */
10377static int hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10378{
10379 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
10380 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP
10381 | CPUMCTX_EXTRN_RFLAGS);
10382 AssertRCReturn(rc, rc);
10383
10384 hmR0VmxAdvanceGuestRipBy(pVCpu, pMixedCtx, pVmxTransient->cbInstr);
10385
10386 /*
10387 * Deliver a debug exception to the guest if it is single-stepping. Don't directly inject a #DB but use the
10388 * pending debug exception field as it takes care of priority of events.
10389 *
10390 * See Intel spec. 32.2.1 "Debug Exceptions".
10391 */
10392 if ( !pVCpu->hm.s.fSingleInstruction
10393 && pMixedCtx->eflags.Bits.u1TF)
10394 {
10395 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
10396 AssertRCReturn(rc, rc);
10397 }
10398
10399 return VINF_SUCCESS;
10400}
10401
10402
10403/**
10404 * Tries to determine what part of the guest-state VT-x has deemed as invalid
10405 * and update error record fields accordingly.
10406 *
10407 * @return VMX_IGS_* return codes.
10408 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
10409 * wrong with the guest state.
10410 *
10411 * @param pVCpu The cross context virtual CPU structure.
10412 * @param pCtx Pointer to the guest-CPU state.
10413 *
10414 * @remarks This function assumes our cache of the VMCS controls
10415 * are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
10416 */
10417static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu, PCPUMCTX pCtx)
10418{
10419#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
10420#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \
10421 uError = (err); \
10422 break; \
10423 } else do { } while (0)
10424
10425 int rc;
10426 PVM pVM = pVCpu->CTX_SUFF(pVM);
10427 uint32_t uError = VMX_IGS_ERROR;
10428 uint32_t u32Val;
10429 bool const fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
10430
10431 do
10432 {
10433 /*
10434 * CR0.
10435 */
10436 uint32_t fSetCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
10437 uint32_t const fZapCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
10438 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG).
10439 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
10440 if (fUnrestrictedGuest)
10441 fSetCr0 &= ~(X86_CR0_PE | X86_CR0_PG);
10442
10443 uint32_t u32GuestCr0;
10444 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCr0);
10445 AssertRCBreak(rc);
10446 HMVMX_CHECK_BREAK((u32GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
10447 HMVMX_CHECK_BREAK(!(u32GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
10448 if ( !fUnrestrictedGuest
10449 && (u32GuestCr0 & X86_CR0_PG)
10450 && !(u32GuestCr0 & X86_CR0_PE))
10451 {
10452 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
10453 }
10454
10455 /*
10456 * CR4.
10457 */
10458 uint64_t const fSetCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
10459 uint64_t const fZapCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
10460
10461 uint32_t u32GuestCr4;
10462 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCr4);
10463 AssertRCBreak(rc);
10464 HMVMX_CHECK_BREAK((u32GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
10465 HMVMX_CHECK_BREAK(!(u32GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
10466
10467 /*
10468 * IA32_DEBUGCTL MSR.
10469 */
10470 uint64_t u64Val;
10471 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
10472 AssertRCBreak(rc);
10473 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
10474 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
10475 {
10476 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
10477 }
10478 uint64_t u64DebugCtlMsr = u64Val;
10479
10480#ifdef VBOX_STRICT
10481 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
10482 AssertRCBreak(rc);
10483 Assert(u32Val == pVCpu->hm.s.vmx.u32EntryCtls);
10484#endif
10485 bool const fLongModeGuest = RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
10486
10487 /*
10488 * RIP and RFLAGS.
10489 */
10490 uint32_t u32Eflags;
10491#if HC_ARCH_BITS == 64
10492 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
10493 AssertRCBreak(rc);
10494 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
10495 if ( !fLongModeGuest
10496 || !pCtx->cs.Attr.n.u1Long)
10497 {
10498 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
10499 }
10500 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
10501 * must be identical if the "IA-32e mode guest" VM-entry
10502 * control is 1 and CS.L is 1. No check applies if the
10503 * CPU supports 64 linear-address bits. */
10504
10505 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
10506 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
10507 AssertRCBreak(rc);
10508 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
10509 VMX_IGS_RFLAGS_RESERVED);
10510 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
10511 u32Eflags = u64Val;
10512#else
10513 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
10514 AssertRCBreak(rc);
10515 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */
10516 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
10517#endif
10518
10519 if ( fLongModeGuest
10520 || ( fUnrestrictedGuest
10521 && !(u32GuestCr0 & X86_CR0_PE)))
10522 {
10523 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
10524 }
10525
10526 uint32_t u32EntryInfo;
10527 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
10528 AssertRCBreak(rc);
10529 if ( VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
10530 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
10531 {
10532 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
10533 }
10534
10535 /*
10536 * 64-bit checks.
10537 */
10538#if HC_ARCH_BITS == 64
10539 if (fLongModeGuest)
10540 {
10541 HMVMX_CHECK_BREAK(u32GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
10542 HMVMX_CHECK_BREAK(u32GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
10543 }
10544
10545 if ( !fLongModeGuest
10546 && (u32GuestCr4 & X86_CR4_PCIDE))
10547 {
10548 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
10549 }
10550
10551 /** @todo CR3 field must be such that bits 63:52 and bits in the range
10552 * 51:32 beyond the processor's physical-address width are 0. */
10553
10554 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
10555 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
10556 {
10557 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
10558 }
10559
10560 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
10561 AssertRCBreak(rc);
10562 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
10563
10564 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
10565 AssertRCBreak(rc);
10566 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
10567#endif
10568
10569 /*
10570 * PERF_GLOBAL MSR.
10571 */
10572 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR)
10573 {
10574 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
10575 AssertRCBreak(rc);
10576 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
10577 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
10578 }
10579
10580 /*
10581 * PAT MSR.
10582 */
10583 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR)
10584 {
10585 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
10586 AssertRCBreak(rc);
10587 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
10588 for (unsigned i = 0; i < 8; i++)
10589 {
10590 uint8_t u8Val = (u64Val & 0xff);
10591 if ( u8Val != 0 /* UC */
10592 && u8Val != 1 /* WC */
10593 && u8Val != 4 /* WT */
10594 && u8Val != 5 /* WP */
10595 && u8Val != 6 /* WB */
10596 && u8Val != 7 /* UC- */)
10597 {
10598 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
10599 }
10600 u64Val >>= 8;
10601 }
10602 }
10603
10604 /*
10605 * EFER MSR.
10606 */
10607 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
10608 {
10609 Assert(pVM->hm.s.vmx.fSupportsVmcsEfer);
10610 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
10611 AssertRCBreak(rc);
10612 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
10613 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
10614 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVCpu->hm.s.vmx.u32EntryCtls
10615 & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
10616 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
10617 HMVMX_CHECK_BREAK( fUnrestrictedGuest
10618 || !(u32GuestCr0 & X86_CR0_PG)
10619 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
10620 VMX_IGS_EFER_LMA_LME_MISMATCH);
10621 }
10622
10623 /*
10624 * Segment registers.
10625 */
10626 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
10627 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
10628 if (!(u32Eflags & X86_EFL_VM))
10629 {
10630 /* CS */
10631 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
10632 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
10633 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
10634 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
10635 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
10636 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
10637 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
10638 /* CS cannot be loaded with NULL in protected mode. */
10639 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
10640 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
10641 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
10642 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
10643 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
10644 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
10645 else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
10646 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
10647 else
10648 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
10649
10650 /* SS */
10651 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10652 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
10653 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
10654 if ( !(pCtx->cr0 & X86_CR0_PE)
10655 || pCtx->cs.Attr.n.u4Type == 3)
10656 {
10657 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
10658 }
10659 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
10660 {
10661 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
10662 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
10663 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
10664 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
10665 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
10666 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
10667 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
10668 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
10669 }
10670
10671 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSegmenReg(). */
10672 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
10673 {
10674 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
10675 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
10676 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10677 || pCtx->ds.Attr.n.u4Type > 11
10678 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
10679 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
10680 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
10681 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
10682 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
10683 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
10684 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
10685 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10686 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
10687 }
10688 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
10689 {
10690 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
10691 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
10692 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10693 || pCtx->es.Attr.n.u4Type > 11
10694 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
10695 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
10696 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
10697 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
10698 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
10699 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
10700 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
10701 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10702 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
10703 }
10704 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
10705 {
10706 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
10707 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
10708 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10709 || pCtx->fs.Attr.n.u4Type > 11
10710 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
10711 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
10712 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
10713 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
10714 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
10715 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
10716 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
10717 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10718 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
10719 }
10720 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
10721 {
10722 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
10723 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
10724 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
10725 || pCtx->gs.Attr.n.u4Type > 11
10726 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
10727 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
10728 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
10729 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
10730 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
10731 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
10732 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
10733 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10734 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
10735 }
10736 /* 64-bit capable CPUs. */
10737#if HC_ARCH_BITS == 64
10738 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
10739 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
10740 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
10741 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
10742 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
10743 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
10744 VMX_IGS_LONGMODE_SS_BASE_INVALID);
10745 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
10746 VMX_IGS_LONGMODE_DS_BASE_INVALID);
10747 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
10748 VMX_IGS_LONGMODE_ES_BASE_INVALID);
10749#endif
10750 }
10751 else
10752 {
10753 /* V86 mode checks. */
10754 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
10755 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
10756 {
10757 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
10758 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
10759 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
10760 }
10761 else
10762 {
10763 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
10764 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
10765 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
10766 }
10767
10768 /* CS */
10769 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
10770 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
10771 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
10772 /* SS */
10773 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
10774 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
10775 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
10776 /* DS */
10777 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
10778 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
10779 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
10780 /* ES */
10781 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
10782 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
10783 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
10784 /* FS */
10785 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
10786 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
10787 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
10788 /* GS */
10789 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
10790 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
10791 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
10792 /* 64-bit capable CPUs. */
10793#if HC_ARCH_BITS == 64
10794 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
10795 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
10796 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
10797 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
10798 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
10799 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
10800 VMX_IGS_LONGMODE_SS_BASE_INVALID);
10801 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
10802 VMX_IGS_LONGMODE_DS_BASE_INVALID);
10803 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
10804 VMX_IGS_LONGMODE_ES_BASE_INVALID);
10805#endif
10806 }
10807
10808 /*
10809 * TR.
10810 */
10811 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
10812 /* 64-bit capable CPUs. */
10813#if HC_ARCH_BITS == 64
10814 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
10815#endif
10816 if (fLongModeGuest)
10817 {
10818 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
10819 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
10820 }
10821 else
10822 {
10823 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
10824 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
10825 VMX_IGS_TR_ATTR_TYPE_INVALID);
10826 }
10827 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
10828 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
10829 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
10830 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
10831 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
10832 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
10833 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
10834 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
10835
10836 /*
10837 * GDTR and IDTR.
10838 */
10839#if HC_ARCH_BITS == 64
10840 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
10841 AssertRCBreak(rc);
10842 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
10843
10844 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
10845 AssertRCBreak(rc);
10846 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
10847#endif
10848
10849 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
10850 AssertRCBreak(rc);
10851 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
10852
10853 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
10854 AssertRCBreak(rc);
10855 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
10856
10857 /*
10858 * Guest Non-Register State.
10859 */
10860 /* Activity State. */
10861 uint32_t u32ActivityState;
10862 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
10863 AssertRCBreak(rc);
10864 HMVMX_CHECK_BREAK( !u32ActivityState
10865 || (u32ActivityState & MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.Msrs.u64Misc)),
10866 VMX_IGS_ACTIVITY_STATE_INVALID);
10867 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
10868 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
10869 uint32_t u32IntrState;
10870 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32IntrState);
10871 AssertRCBreak(rc);
10872 if ( u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
10873 || u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
10874 {
10875 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
10876 }
10877
10878 /** @todo Activity state and injecting interrupts. Left as a todo since we
10879 * currently don't use activity states but ACTIVE. */
10880
10881 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
10882 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
10883
10884 /* Guest interruptibility-state. */
10885 HMVMX_CHECK_BREAK(!(u32IntrState & 0xfffffff0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
10886 HMVMX_CHECK_BREAK((u32IntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
10887 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS))
10888 != ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
10889 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
10890 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
10891 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
10892 || !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
10893 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
10894 if (VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo))
10895 {
10896 if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
10897 {
10898 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
10899 && !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
10900 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
10901 }
10902 else if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
10903 {
10904 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
10905 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
10906 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
10907 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
10908 }
10909 }
10910 /** @todo Assumes the processor is not in SMM. */
10911 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
10912 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
10913 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
10914 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
10915 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
10916 if ( (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
10917 && VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(u32EntryInfo)
10918 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
10919 {
10920 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI),
10921 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
10922 }
10923
10924 /* Pending debug exceptions. */
10925#if HC_ARCH_BITS == 64
10926 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val);
10927 AssertRCBreak(rc);
10928 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
10929 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
10930 u32Val = u64Val; /* For pending debug exceptions checks below. */
10931#else
10932 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val);
10933 AssertRCBreak(rc);
10934 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
10935 HMVMX_CHECK_BREAK(!(u32Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
10936#endif
10937
10938 if ( (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
10939 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS)
10940 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
10941 {
10942 if ( (u32Eflags & X86_EFL_TF)
10943 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
10944 {
10945 /* Bit 14 is PendingDebug.BS. */
10946 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
10947 }
10948 if ( !(u32Eflags & X86_EFL_TF)
10949 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
10950 {
10951 /* Bit 14 is PendingDebug.BS. */
10952 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
10953 }
10954 }
10955
10956 /* VMCS link pointer. */
10957 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
10958 AssertRCBreak(rc);
10959 if (u64Val != UINT64_C(0xffffffffffffffff))
10960 {
10961 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
10962 /** @todo Bits beyond the processor's physical-address width MBZ. */
10963 /** @todo 32-bit located in memory referenced by value of this field (as a
10964 * physical address) must contain the processor's VMCS revision ID. */
10965 /** @todo SMM checks. */
10966 }
10967
10968 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
10969 * not using Nested Paging? */
10970 if ( pVM->hm.s.fNestedPaging
10971 && !fLongModeGuest
10972 && CPUMIsGuestInPAEModeEx(pCtx))
10973 {
10974 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
10975 AssertRCBreak(rc);
10976 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10977
10978 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
10979 AssertRCBreak(rc);
10980 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10981
10982 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
10983 AssertRCBreak(rc);
10984 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10985
10986 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
10987 AssertRCBreak(rc);
10988 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10989 }
10990
10991 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
10992 if (uError == VMX_IGS_ERROR)
10993 uError = VMX_IGS_REASON_NOT_FOUND;
10994 } while (0);
10995
10996 pVCpu->hm.s.u32HMError = uError;
10997 return uError;
10998
10999#undef HMVMX_ERROR_BREAK
11000#undef HMVMX_CHECK_BREAK
11001}
11002
11003/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11004/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
11005/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
11006
11007/** @name VM-exit handlers.
11008 * @{
11009 */
11010
11011/**
11012 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
11013 */
11014HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11015{
11016 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11017 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
11018 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
11019 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
11020 return VINF_SUCCESS;
11021 return VINF_EM_RAW_INTERRUPT;
11022}
11023
11024
11025/**
11026 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
11027 */
11028HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11029{
11030 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11031 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
11032
11033 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11034 AssertRCReturn(rc, rc);
11035
11036 uint32_t uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntInfo);
11037 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)
11038 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
11039 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
11040
11041 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
11042 {
11043 /*
11044 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
11045 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
11046 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
11047 *
11048 * [1] -- See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
11049 * [2] -- See Intel spec. 27.5.5 "Updating Non-Register State".
11050 */
11051 VMXDispatchHostNmi();
11052 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
11053 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
11054 return VINF_SUCCESS;
11055 }
11056
11057 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
11058 VBOXSTRICTRC rcStrictRc1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
11059 if (RT_UNLIKELY(rcStrictRc1 == VINF_SUCCESS))
11060 { /* likely */ }
11061 else
11062 {
11063 if (rcStrictRc1 == VINF_HM_DOUBLE_FAULT)
11064 rcStrictRc1 = VINF_SUCCESS;
11065 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
11066 return rcStrictRc1;
11067 }
11068
11069 uint32_t uExitIntInfo = pVmxTransient->uExitIntInfo;
11070 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntInfo);
11071 switch (uIntType)
11072 {
11073 case VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT: /* Privileged software exception. (#DB from ICEBP) */
11074 Assert(uVector == X86_XCPT_DB);
11075 RT_FALL_THRU();
11076 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
11077 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT);
11078 RT_FALL_THRU();
11079 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
11080 {
11081 /*
11082 * If there's any exception caused as a result of event injection, the resulting
11083 * secondary/final execption will be pending, we shall continue guest execution
11084 * after injecting the event. The page-fault case is complicated and we manually
11085 * handle any currently pending event in hmR0VmxExitXcptPF.
11086 */
11087 if (!pVCpu->hm.s.Event.fPending)
11088 { /* likely */ }
11089 else if (uVector != X86_XCPT_PF)
11090 {
11091 rc = VINF_SUCCESS;
11092 break;
11093 }
11094
11095 switch (uVector)
11096 {
11097 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
11098 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
11099 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
11100 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
11101 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
11102 case X86_XCPT_AC: rc = hmR0VmxExitXcptAC(pVCpu, pMixedCtx, pVmxTransient); break;
11103
11104 case X86_XCPT_NM: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
11105 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11106 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
11107 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11108 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
11109 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11110 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
11111 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11112 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
11113 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11114 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
11115 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11116 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS);
11117 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
11118 default:
11119 {
11120 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
11121 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
11122 {
11123 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
11124 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
11125 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
11126
11127 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);
11128 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11129 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11130 AssertRCReturn(rc, rc);
11131 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
11132 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode,
11133 0 /* GCPtrFaultAddress */);
11134 }
11135 else
11136 {
11137 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
11138 pVCpu->hm.s.u32HMError = uVector;
11139 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
11140 }
11141 break;
11142 }
11143 }
11144 break;
11145 }
11146
11147 default:
11148 {
11149 pVCpu->hm.s.u32HMError = uExitIntInfo;
11150 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
11151 AssertMsgFailed(("Unexpected interruption info %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntInfo)));
11152 break;
11153 }
11154 }
11155 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
11156 return rc;
11157}
11158
11159
11160/**
11161 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
11162 */
11163HMVMX_EXIT_NSRC_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11164{
11165 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11166
11167 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
11168 hmR0VmxClearIntWindowExitVmcs(pVCpu);
11169
11170 /* Deliver the pending interrupts via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
11171 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
11172 return VINF_SUCCESS;
11173}
11174
11175
11176/**
11177 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
11178 */
11179HMVMX_EXIT_NSRC_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11180{
11181 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11182 if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT)))
11183 {
11184 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
11185 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11186 }
11187
11188 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));
11189
11190 /*
11191 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
11192 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
11193 */
11194 uint32_t fIntrState = 0;
11195 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &fIntrState);
11196 AssertRCReturn(rc, rc);
11197
11198 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
11199 if ( fBlockSti
11200 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
11201 {
11202 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
11203 }
11204
11205 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
11206 hmR0VmxClearNmiWindowExitVmcs(pVCpu);
11207
11208 /* Deliver the pending NMI via hmR0VmxEvaluatePendingEvent() and resume guest execution. */
11209 return VINF_SUCCESS;
11210}
11211
11212
11213/**
11214 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
11215 */
11216HMVMX_EXIT_NSRC_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11217{
11218 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11219 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11220}
11221
11222
11223/**
11224 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
11225 */
11226HMVMX_EXIT_NSRC_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11227{
11228 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11229 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11230}
11231
11232
11233/**
11234 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
11235 */
11236HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11237{
11238 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11239 Assert(pMixedCtx == &pVCpu->cpum.GstCtx);
11240
11241 /*
11242 * Get the state we need and update the exit history entry.
11243 */
11244 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11245 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11246 AssertRCReturn(rc, rc);
11247
11248 VBOXSTRICTRC rcStrict;
11249 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
11250 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
11251 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
11252 if (!pExitRec)
11253 {
11254 /*
11255 * Regular CPUID instruction execution.
11256 */
11257 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbInstr);
11258 if (rcStrict == VINF_SUCCESS)
11259 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RAX
11260 | HM_CHANGED_GUEST_RCX | HM_CHANGED_GUEST_RDX | HM_CHANGED_GUEST_RBX);
11261 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11262 {
11263 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK);
11264 rcStrict = VINF_SUCCESS;
11265 }
11266 }
11267 else
11268 {
11269 /*
11270 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
11271 */
11272 Assert(pMixedCtx == &pVCpu->cpum.GstCtx);
11273 int rc2 = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
11274 AssertRCReturn(rc2, rc2);
11275
11276 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
11277 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
11278
11279 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
11280 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
11281
11282 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
11283 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
11284 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
11285 }
11286 return rcStrict;
11287}
11288
11289
11290/**
11291 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
11292 */
11293HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11294{
11295 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11296 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR4);
11297 AssertRCReturn(rc, rc);
11298
11299 if (pMixedCtx->cr4 & X86_CR4_SMXE)
11300 return VINF_EM_RAW_EMULATE_INSTR;
11301
11302 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
11303 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11304}
11305
11306
11307/**
11308 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
11309 */
11310HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11311{
11312 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11313 int rc = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
11314 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11315 AssertRCReturn(rc, rc);
11316
11317 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbInstr);
11318 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
11319 {
11320 /* If we get a spurious VM-exit when offsetting is enabled,
11321 we must reset offsetting on VM-reentry. See @bugref{6634}. */
11322 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
11323 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
11324 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
11325 | HM_CHANGED_GUEST_RAX | HM_CHANGED_GUEST_RDX);
11326 }
11327 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11328 {
11329 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK);
11330 rcStrict = VINF_SUCCESS;
11331 }
11332 return rcStrict;
11333}
11334
11335
11336/**
11337 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
11338 */
11339HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11340{
11341 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11342 int rc = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK
11343 | CPUMCTX_EXTRN_TSC_AUX);
11344 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11345 AssertRCReturn(rc, rc);
11346
11347 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbInstr);
11348 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
11349 {
11350 /* If we get a spurious VM-exit when offsetting is enabled,
11351 we must reset offsetting on VM-reentry. See @bugref{6634}. */
11352 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
11353 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
11354 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
11355 | HM_CHANGED_GUEST_RAX | HM_CHANGED_GUEST_RDX | HM_CHANGED_GUEST_RCX);
11356 }
11357 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11358 {
11359 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK);
11360 rcStrict = VINF_SUCCESS;
11361 }
11362 return rcStrict;
11363}
11364
11365
11366/**
11367 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
11368 */
11369HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11370{
11371 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11372 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR4
11373 | CPUMCTX_EXTRN_CR0
11374 | CPUMCTX_EXTRN_RFLAGS
11375 | CPUMCTX_EXTRN_SS);
11376 AssertRCReturn(rc, rc);
11377
11378 PVM pVM = pVCpu->CTX_SUFF(pVM);
11379 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11380 if (RT_LIKELY(rc == VINF_SUCCESS))
11381 {
11382 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11383 Assert(pVmxTransient->cbInstr == 2);
11384 }
11385 else
11386 {
11387 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
11388 rc = VERR_EM_INTERPRETER;
11389 }
11390 return rc;
11391}
11392
11393
11394/**
11395 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
11396 */
11397HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11398{
11399 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11400
11401 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
11402 if (EMAreHypercallInstructionsEnabled(pVCpu))
11403 {
11404 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP
11405 | CPUMCTX_EXTRN_RFLAGS
11406 | CPUMCTX_EXTRN_CR0
11407 | CPUMCTX_EXTRN_SS
11408 | CPUMCTX_EXTRN_CS
11409 | CPUMCTX_EXTRN_EFER);
11410 AssertRCReturn(rc, rc);
11411
11412 /* Perform the hypercall. */
11413 rcStrict = GIMHypercall(pVCpu, pMixedCtx);
11414 if (rcStrict == VINF_SUCCESS)
11415 {
11416 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11417 AssertRCReturn(rc, rc);
11418 }
11419 else
11420 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
11421 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
11422 || RT_FAILURE(rcStrict));
11423
11424 /* If the hypercall changes anything other than guest's general-purpose registers,
11425 we would need to reload the guest changed bits here before VM-entry. */
11426 }
11427 else
11428 Log4Func(("Hypercalls not enabled\n"));
11429
11430 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
11431 if (RT_FAILURE(rcStrict))
11432 {
11433 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
11434 rcStrict = VINF_SUCCESS;
11435 }
11436
11437 return rcStrict;
11438}
11439
11440
11441/**
11442 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
11443 */
11444HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11445{
11446 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11447 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hm.s.fUsingDebugLoop);
11448
11449 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
11450 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11451 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11452 AssertRCReturn(rc, rc);
11453
11454 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbInstr, pVmxTransient->uExitQualification);
11455
11456 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
11457 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11458 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11459 {
11460 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK);
11461 rcStrict = VINF_SUCCESS;
11462 }
11463 else
11464 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) sttus: %Rrc\n",
11465 pVmxTransient->uExitQualification, VBOXSTRICTRC_VAL(rcStrict)));
11466 return rcStrict;
11467}
11468
11469
11470/**
11471 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
11472 */
11473HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11474{
11475 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11476 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0
11477 | CPUMCTX_EXTRN_RFLAGS
11478 | CPUMCTX_EXTRN_SS);
11479 AssertRCReturn(rc, rc);
11480
11481 PVM pVM = pVCpu->CTX_SUFF(pVM);
11482 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11483 if (RT_LIKELY(rc == VINF_SUCCESS))
11484 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11485 else
11486 {
11487 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
11488 rc = VERR_EM_INTERPRETER;
11489 }
11490 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
11491 return rc;
11492}
11493
11494
11495/**
11496 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
11497 */
11498HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11499{
11500 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11501 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0
11502 | CPUMCTX_EXTRN_RFLAGS
11503 | CPUMCTX_EXTRN_SS);
11504 AssertRCReturn(rc, rc);
11505
11506 PVM pVM = pVCpu->CTX_SUFF(pVM);
11507 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
11508 rc = VBOXSTRICTRC_VAL(rc2);
11509 if (RT_LIKELY( rc == VINF_SUCCESS
11510 || rc == VINF_EM_HALT))
11511 {
11512 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11513 AssertRCReturn(rc3, rc3);
11514
11515 if ( rc == VINF_EM_HALT
11516 && EMMonitorWaitShouldContinue(pVCpu, pMixedCtx))
11517 rc = VINF_SUCCESS;
11518 }
11519 else
11520 {
11521 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
11522 rc = VERR_EM_INTERPRETER;
11523 }
11524 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
11525 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
11526 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
11527 return rc;
11528}
11529
11530
11531/**
11532 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
11533 */
11534HMVMX_EXIT_NSRC_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11535{
11536 /*
11537 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root
11538 * mode. In theory, we should never get this VM-exit. This can happen only if dual-monitor
11539 * treatment of SMI and VMX is enabled, which can (only?) be done by executing VMCALL in
11540 * VMX root operation. If we get here, something funny is going on.
11541 *
11542 * See Intel spec. 33.15.5 "Enabling the Dual-Monitor Treatment".
11543 */
11544 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11545 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11546 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11547}
11548
11549
11550/**
11551 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
11552 */
11553HMVMX_EXIT_NSRC_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11554{
11555 /*
11556 * This can only happen if we support dual-monitor treatment of SMI, which can be activated
11557 * by executing VMCALL in VMX root operation. Only an STM (SMM transfer monitor) would get
11558 * this VM-exit when we (the executive monitor) execute a VMCALL in VMX root mode or receive
11559 * an SMI. If we get here, something funny is going on.
11560 *
11561 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
11562 * See Intel spec. 25.3 "Other Causes of VM-Exits"
11563 */
11564 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11565 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11566 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11567}
11568
11569
11570/**
11571 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
11572 */
11573HMVMX_EXIT_NSRC_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11574{
11575 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
11576 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11577 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11578 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11579}
11580
11581
11582/**
11583 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
11584 */
11585HMVMX_EXIT_NSRC_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11586{
11587 /*
11588 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used.
11589 * We don't make use of it as our guests don't have direct access to the host LAPIC.
11590 * See Intel spec. 25.3 "Other Causes of VM-exits".
11591 */
11592 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11593 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11594 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11595}
11596
11597
11598/**
11599 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
11600 * VM-exit.
11601 */
11602HMVMX_EXIT_NSRC_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11603{
11604 /*
11605 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
11606 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery" and Intel spec. 29.3 "VMX Instructions" for "VMXON".
11607 *
11608 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these VM-exits.
11609 * See Intel spec. "23.8 Restrictions on VMX operation".
11610 */
11611 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11612 return VINF_SUCCESS;
11613}
11614
11615
11616/**
11617 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
11618 * VM-exit.
11619 */
11620HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11621{
11622 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11623 return VINF_EM_RESET;
11624}
11625
11626
11627/**
11628 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
11629 */
11630HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11631{
11632 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11633 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
11634
11635 int rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
11636 AssertRCReturn(rc, rc);
11637
11638 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
11639 rc = VINF_SUCCESS;
11640 else
11641 rc = VINF_EM_HALT;
11642
11643 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
11644 if (rc != VINF_SUCCESS)
11645 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
11646 return rc;
11647}
11648
11649
11650/**
11651 * VM-exit handler for instructions that result in a \#UD exception delivered to
11652 * the guest.
11653 */
11654HMVMX_EXIT_NSRC_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11655{
11656 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11657 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
11658 return VINF_SUCCESS;
11659}
11660
11661
11662/**
11663 * VM-exit handler for expiry of the VMX preemption timer.
11664 */
11665HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11666{
11667 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11668
11669 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
11670 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
11671
11672 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
11673 PVM pVM = pVCpu->CTX_SUFF(pVM);
11674 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
11675 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
11676 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
11677}
11678
11679
11680/**
11681 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
11682 */
11683HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11684{
11685 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11686
11687 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11688 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK
11689 | CPUMCTX_EXTRN_CR4);
11690 AssertRCReturn(rc, rc);
11691
11692 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbInstr);
11693 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
11694 : HM_CHANGED_XCPT_RAISED_MASK);
11695
11696 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0();
11697
11698 return rcStrict;
11699}
11700
11701
11702/**
11703 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
11704 */
11705HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11706{
11707 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11708 /** @todo Use VM-exit instruction information. */
11709 return VERR_EM_INTERPRETER;
11710}
11711
11712
11713/**
11714 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
11715 * Error VM-exit.
11716 */
11717HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11718{
11719 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
11720 AssertRCReturn(rc, rc);
11721 rc = hmR0VmxCheckVmcsCtls(pVCpu);
11722 if (RT_FAILURE(rc))
11723 return rc;
11724
11725 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pMixedCtx);
11726 NOREF(uInvalidReason);
11727
11728#ifdef VBOX_STRICT
11729 uint32_t fIntrState;
11730 RTHCUINTREG uHCReg;
11731 uint64_t u64Val;
11732 uint32_t u32Val;
11733
11734 rc = hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
11735 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
11736 rc |= hmR0VmxReadEntryInstrLenVmcs(pVmxTransient);
11737 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &fIntrState);
11738 AssertRCReturn(rc, rc);
11739
11740 Log4(("uInvalidReason %u\n", uInvalidReason));
11741 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
11742 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
11743 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
11744 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", fIntrState));
11745
11746 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
11747 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
11748 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
11749 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
11750 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
11751 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
11752 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
11753 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
11754 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
11755 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
11756 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
11757 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
11758
11759 hmR0DumpRegs(pVCpu, pMixedCtx);
11760#else
11761 NOREF(pVmxTransient);
11762#endif
11763
11764 return VERR_VMX_INVALID_GUEST_STATE;
11765}
11766
11767
11768/**
11769 * VM-exit handler for VM-entry failure due to an MSR-load
11770 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
11771 */
11772HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11773{
11774 NOREF(pVmxTransient);
11775 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
11776 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11777}
11778
11779
11780/**
11781 * VM-exit handler for VM-entry failure due to a machine-check event
11782 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
11783 */
11784HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11785{
11786 NOREF(pVmxTransient);
11787 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
11788 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11789}
11790
11791
11792/**
11793 * VM-exit handler for all undefined reasons. Should never ever happen.. in
11794 * theory.
11795 */
11796HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11797{
11798 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
11799 NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient);
11800 return VERR_VMX_UNDEFINED_EXIT_CODE;
11801}
11802
11803
11804/**
11805 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
11806 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
11807 * Conditional VM-exit.
11808 */
11809HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11810{
11811 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11812
11813 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
11814 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
11815 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
11816 return VERR_EM_INTERPRETER;
11817 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11818 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11819}
11820
11821
11822/**
11823 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
11824 */
11825HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11826{
11827 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11828
11829 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
11830 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
11831 return VERR_EM_INTERPRETER;
11832 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
11833 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11834}
11835
11836
11837/**
11838 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
11839 */
11840HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11841{
11842 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11843
11844 /** @todo Optimize this: We currently drag in in the whole MSR state
11845 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
11846 * MSRs required. That would require changes to IEM and possibly CPUM too.
11847 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
11848 uint32_t const idMsr = pMixedCtx->ecx; NOREF(idMsr); /* Save it. */
11849 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11850 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
11851 AssertRCReturn(rc, rc);
11852
11853 Log4Func(("ecx=%#RX32\n", idMsr));
11854
11855#ifdef VBOX_STRICT
11856 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
11857 {
11858 if ( hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr)
11859 && idMsr != MSR_K6_EFER)
11860 {
11861 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
11862 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11863 }
11864 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
11865 {
11866 VMXMSREXITREAD enmRead;
11867 VMXMSREXITWRITE enmWrite;
11868 int rc2 = hmR0VmxGetMsrPermission(pVCpu, idMsr, &enmRead, &enmWrite);
11869 AssertRCReturn(rc2, rc2);
11870 if (enmRead == VMXMSREXIT_PASSTHRU_READ)
11871 {
11872 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
11873 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11874 }
11875 }
11876 }
11877#endif
11878
11879 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbInstr);
11880 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
11881 if (rcStrict == VINF_SUCCESS)
11882 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
11883 | HM_CHANGED_GUEST_RAX | HM_CHANGED_GUEST_RDX);
11884 else if (rcStrict == VINF_IEM_RAISED_XCPT)
11885 {
11886 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK);
11887 rcStrict = VINF_SUCCESS;
11888 }
11889 else
11890 AssertMsg(rcStrict == VINF_IEM_RAISED_XCPT, ("Unexpected IEMExecDecodedRdmsr status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
11891
11892 return rcStrict;
11893}
11894
11895
11896/**
11897 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
11898 */
11899HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
11900{
11901 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
11902
11903 /** @todo Optimize this: We currently drag in in the whole MSR state
11904 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
11905 * MSRs required. That would require changes to IEM and possibly CPUM too.
11906 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
11907 uint32_t const idMsr = pMixedCtx->ecx; /* Save it. */
11908 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
11909 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS);
11910 AssertRCReturn(rc, rc);
11911
11912 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pMixedCtx->edx, pMixedCtx->eax));
11913
11914 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbInstr);
11915 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
11916
11917 if (rcStrict == VINF_SUCCESS)
11918 {
11919 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
11920
11921 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
11922 if ( idMsr == MSR_IA32_APICBASE
11923 || ( idMsr >= MSR_IA32_X2APIC_START
11924 && idMsr <= MSR_IA32_X2APIC_END))
11925 {
11926 /*
11927 * We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register
11928 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before IEM changes it.
11929 */
11930 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
11931 }
11932 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
11933 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
11934 else if (idMsr == MSR_K6_EFER)
11935 {
11936 /*
11937 * If the guest touches EFER we need to update the VM-Entry and VM-Exit controls as well,
11938 * even if it is -not- touching bits that cause paging mode changes (LMA/LME). We care about
11939 * the other bits as well, SCE and NXE. See @bugref{7368}.
11940 */
11941 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR
11942 | HM_CHANGED_VMX_ENTRY_CTLS
11943 | HM_CHANGED_VMX_EXIT_CTLS);
11944 }
11945
11946 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */
11947 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
11948 {
11949 switch (idMsr)
11950 {
11951 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
11952 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
11953 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
11954 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS); break;
11955 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS); break;
11956 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
11957 default:
11958 {
11959 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr))
11960 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
11961 else if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
11962 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
11963 break;
11964 }
11965 }
11966 }
11967#ifdef VBOX_STRICT
11968 else
11969 {
11970 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
11971 switch (idMsr)
11972 {
11973 case MSR_IA32_SYSENTER_CS:
11974 case MSR_IA32_SYSENTER_EIP:
11975 case MSR_IA32_SYSENTER_ESP:
11976 case MSR_K8_FS_BASE:
11977 case MSR_K8_GS_BASE:
11978 {
11979 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
11980 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11981 }
11982
11983 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
11984 default:
11985 {
11986 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr))
11987 {
11988 /* EFER writes are always intercepted, see hmR0VmxExportGuestMsrs(). */
11989 if (idMsr != MSR_K6_EFER)
11990 {
11991 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
11992 idMsr));
11993 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
11994 }
11995 }
11996
11997 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
11998 {
11999 VMXMSREXITREAD enmRead;
12000 VMXMSREXITWRITE enmWrite;
12001 int rc2 = hmR0VmxGetMsrPermission(pVCpu, idMsr, &enmRead, &enmWrite);
12002 AssertRCReturn(rc2, rc2);
12003 if (enmWrite == VMXMSREXIT_PASSTHRU_WRITE)
12004 {
12005 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
12006 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
12007 }
12008 }
12009 break;
12010 }
12011 }
12012 }
12013#endif /* VBOX_STRICT */
12014 }
12015 else if (rcStrict == VINF_IEM_RAISED_XCPT)
12016 {
12017 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK);
12018 rcStrict = VINF_SUCCESS;
12019 }
12020 else
12021 AssertMsg(rcStrict == VINF_IEM_RAISED_XCPT, ("Unexpected IEMExecDecodedWrmsr status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12022
12023 return rcStrict;
12024}
12025
12026
12027/**
12028 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
12029 */
12030HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12031{
12032 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12033 /** @todo The guest has likely hit a contended spinlock. We might want to
12034 * poke a schedule different guest VCPU. */
12035 return VINF_EM_RAW_INTERRUPT;
12036}
12037
12038
12039/**
12040 * VM-exit handler for when the TPR value is lowered below the specified
12041 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
12042 */
12043HMVMX_EXIT_NSRC_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12044{
12045 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12046 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
12047
12048 /*
12049 * The TPR shadow would've been synced with the APIC TPR in hmR0VmxPostRunGuest(). We'll re-evaluate
12050 * pending interrupts and inject them before the next VM-entry so we can just continue execution here.
12051 */
12052 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
12053 return VINF_SUCCESS;
12054}
12055
12056
12057/**
12058 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
12059 * VM-exit.
12060 *
12061 * @retval VINF_SUCCESS when guest execution can continue.
12062 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
12063 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
12064 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
12065 * interpreter.
12066 */
12067HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12068{
12069 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12070 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
12071
12072 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12073 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12074 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
12075 AssertRCReturn(rc, rc);
12076
12077 VBOXSTRICTRC rcStrict;
12078 PVM pVM = pVCpu->CTX_SUFF(pVM);
12079 RTGCUINTPTR const uExitQualification = pVmxTransient->uExitQualification;
12080 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQualification);
12081 switch (uAccessType)
12082 {
12083 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE: /* MOV to CRx */
12084 {
12085 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr,
12086 VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification),
12087 VMX_EXIT_QUAL_CRX_GENREG(uExitQualification));
12088 AssertMsg( rcStrict == VINF_SUCCESS
12089 || rcStrict == VINF_IEM_RAISED_XCPT
12090 || rcStrict == VINF_PGM_CHANGE_MODE
12091 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12092
12093 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification))
12094 {
12095 case 0:
12096 {
12097 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
12098 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
12099 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
12100 Log4(("CRX CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr0));
12101 break;
12102 }
12103
12104 case 2:
12105 {
12106 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write);
12107 /* Nothing to do here, CR2 it's not part of the VMCS. */
12108 break;
12109 }
12110
12111 case 3:
12112 {
12113 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx) || pVCpu->hm.s.fUsingDebugLoop);
12114 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write);
12115 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
12116 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
12117 Log4(("CRX CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr3));
12118 break;
12119 }
12120
12121 case 4:
12122 {
12123 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write);
12124 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
12125 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
12126 Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
12127 pMixedCtx->cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
12128 break;
12129 }
12130
12131 case 8:
12132 {
12133 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write);
12134 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
12135 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
12136 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
12137 break;
12138 }
12139 default:
12140 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification)));
12141 break;
12142 }
12143 break;
12144 }
12145
12146 case VMX_EXIT_QUAL_CRX_ACCESS_READ: /* MOV from CRx */
12147 {
12148 Assert( !pVM->hm.s.fNestedPaging
12149 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
12150 || pVCpu->hm.s.fUsingDebugLoop
12151 || VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 3);
12152 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
12153 Assert( VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 8
12154 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
12155
12156 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr,
12157 VMX_EXIT_QUAL_CRX_GENREG(uExitQualification),
12158 VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification));
12159 AssertMsg( rcStrict == VINF_SUCCESS
12160 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12161#ifdef VBOX_WITH_STATISTICS
12162 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification))
12163 {
12164 case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
12165 case 2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break;
12166 case 3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break;
12167 case 4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break;
12168 case 8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break;
12169 }
12170#endif
12171 Log4(("CRX CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification),
12172 VBOXSTRICTRC_VAL(rcStrict)));
12173 if (VMX_EXIT_QUAL_CRX_GENREG(uExitQualification) == X86_GREG_xSP)
12174 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
12175 else
12176 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
12177 break;
12178 }
12179
12180 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
12181 {
12182 rcStrict = IEMExecDecodedClts(pVCpu, pVmxTransient->cbInstr);
12183 AssertMsg( rcStrict == VINF_SUCCESS
12184 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12185
12186 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
12187 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
12188 Log4(("CRX CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
12189 break;
12190 }
12191
12192 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
12193 {
12194 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr,
12195 VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQualification));
12196 AssertMsg( rcStrict == VINF_SUCCESS
12197 || rcStrict == VINF_IEM_RAISED_XCPT
12198 || rcStrict == VINF_PGM_CHANGE_MODE,
12199 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12200
12201 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
12202 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
12203 Log4(("CRX LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
12204 break;
12205 }
12206
12207 default:
12208 AssertMsgFailedReturn(("Invalid access-type in Mov CRx VM-exit qualification %#x\n", uAccessType),
12209 VERR_VMX_UNEXPECTED_EXCEPTION);
12210 }
12211
12212 Assert( (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
12213 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
12214 if (rcStrict == VINF_IEM_RAISED_XCPT)
12215 {
12216 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK);
12217 rcStrict = VINF_SUCCESS;
12218 }
12219
12220 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
12221 NOREF(pVM);
12222 return rcStrict;
12223}
12224
12225
12226/**
12227 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
12228 * VM-exit.
12229 */
12230HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12231{
12232 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12233 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
12234 Assert(pMixedCtx == &pVCpu->cpum.GstCtx);
12235
12236 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12237 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12238 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK
12239 | CPUMCTX_EXTRN_SREG_MASK
12240 | CPUMCTX_EXTRN_EFER);
12241 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
12242 AssertRCReturn(rc, rc);
12243
12244 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
12245 uint32_t uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQualification);
12246 uint8_t uIOWidth = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQualification);
12247 bool fIOWrite = ( VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQualification)
12248 == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
12249 bool fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQualification);
12250 bool fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
12251 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction;
12252 AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_VMX_IPE_1);
12253
12254 /*
12255 * Update exit history to see if this exit can be optimized.
12256 */
12257 VBOXSTRICTRC rcStrict;
12258 PCEMEXITREC pExitRec = NULL;
12259 if ( !fGstStepping
12260 && !fDbgStepping)
12261 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
12262 !fIOString
12263 ? !fIOWrite
12264 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
12265 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
12266 : !fIOWrite
12267 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
12268 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
12269 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
12270 if (!pExitRec)
12271 {
12272 /* I/O operation lookup arrays. */
12273 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */
12274 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
12275 uint32_t const cbValue = s_aIOSizes[uIOWidth];
12276 uint32_t const cbInstr = pVmxTransient->cbInstr;
12277 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
12278 PVM pVM = pVCpu->CTX_SUFF(pVM);
12279 if (fIOString)
12280 {
12281 /*
12282 * INS/OUTS - I/O String instruction.
12283 *
12284 * Use instruction-information if available, otherwise fall back on
12285 * interpreting the instruction.
12286 */
12287 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue,
12288 fIOWrite ? 'w' : 'r'));
12289 AssertReturn(pMixedCtx->dx == uIOPort, VERR_VMX_IPE_2);
12290 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo))
12291 {
12292 int rc2 = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
12293 AssertRCReturn(rc2, rc2);
12294 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
12295 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
12296 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
12297 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification);
12298 if (fIOWrite)
12299 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
12300 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
12301 else
12302 {
12303 /*
12304 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
12305 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
12306 * See Intel Instruction spec. for "INS".
12307 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
12308 */
12309 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
12310 }
12311 }
12312 else
12313 rcStrict = IEMExecOne(pVCpu);
12314
12315 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
12316 fUpdateRipAlready = true;
12317 }
12318 else
12319 {
12320 /*
12321 * IN/OUT - I/O instruction.
12322 */
12323 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue,
12324 fIOWrite ? 'w' : 'r'));
12325 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
12326 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification));
12327 if (fIOWrite)
12328 {
12329 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue);
12330 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
12331 }
12332 else
12333 {
12334 uint32_t u32Result = 0;
12335 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
12336 if (IOM_SUCCESS(rcStrict))
12337 {
12338 /* Save result of I/O IN instr. in AL/AX/EAX. */
12339 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
12340 }
12341 else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
12342 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
12343 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
12344 }
12345 }
12346
12347 if (IOM_SUCCESS(rcStrict))
12348 {
12349 if (!fUpdateRipAlready)
12350 {
12351 hmR0VmxAdvanceGuestRipBy(pVCpu, pMixedCtx, cbInstr);
12352 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
12353 }
12354
12355 /*
12356 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
12357 * while booting Fedora 17 64-bit guest.
12358 *
12359 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
12360 */
12361 if (fIOString)
12362 {
12363 /** @todo Single-step for INS/OUTS with REP prefix? */
12364 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
12365 }
12366 else if ( !fDbgStepping
12367 && fGstStepping)
12368 {
12369 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
12370 AssertRCReturn(rc, rc);
12371 }
12372
12373 /*
12374 * If any I/O breakpoints are armed, we need to check if one triggered
12375 * and take appropriate action.
12376 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
12377 */
12378 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_DR7);
12379 AssertRCReturn(rc, rc);
12380
12381 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
12382 * execution engines about whether hyper BPs and such are pending. */
12383 uint32_t const uDr7 = pMixedCtx->dr[7];
12384 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
12385 && X86_DR7_ANY_RW_IO(uDr7)
12386 && (pMixedCtx->cr4 & X86_CR4_DE))
12387 || DBGFBpIsHwIoArmed(pVM)))
12388 {
12389 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
12390
12391 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
12392 VMMRZCallRing3Disable(pVCpu);
12393 HM_DISABLE_PREEMPT();
12394
12395 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
12396
12397 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue);
12398 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
12399 {
12400 /* Raise #DB. */
12401 if (fIsGuestDbgActive)
12402 ASMSetDR6(pMixedCtx->dr[6]);
12403 if (pMixedCtx->dr[7] != uDr7)
12404 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR7;
12405
12406 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
12407 }
12408 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
12409 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
12410 else if ( rcStrict2 != VINF_SUCCESS
12411 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
12412 rcStrict = rcStrict2;
12413 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
12414
12415 HM_RESTORE_PREEMPT();
12416 VMMRZCallRing3Enable(pVCpu);
12417 }
12418 }
12419
12420#ifdef VBOX_STRICT
12421 if (rcStrict == VINF_IOM_R3_IOPORT_READ)
12422 Assert(!fIOWrite);
12423 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE)
12424 Assert(fIOWrite);
12425 else
12426 {
12427# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
12428 * statuses, that the VMM device and some others may return. See
12429 * IOM_SUCCESS() for guidance. */
12430 AssertMsg( RT_FAILURE(rcStrict)
12431 || rcStrict == VINF_SUCCESS
12432 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
12433 || rcStrict == VINF_EM_DBG_BREAKPOINT
12434 || rcStrict == VINF_EM_RAW_GUEST_TRAP
12435 || rcStrict == VINF_EM_RAW_TO_R3
12436 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
12437# endif
12438 }
12439#endif
12440 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
12441 }
12442 else
12443 {
12444 /*
12445 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
12446 */
12447 int rc2 = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
12448 AssertRCReturn(rc2, rc2);
12449 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead
12450 : fIOWrite ? &pVCpu->hm.s.StatExitIOStringWrite : &pVCpu->hm.s.StatExitIOStringRead);
12451 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
12452 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
12453 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification) ? "REP " : "",
12454 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOWidth));
12455
12456 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
12457 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
12458
12459 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
12460 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
12461 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
12462 }
12463 return rcStrict;
12464}
12465
12466
12467/**
12468 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
12469 * VM-exit.
12470 */
12471HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12472{
12473 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12474
12475 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
12476 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12477 AssertRCReturn(rc, rc);
12478 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
12479 {
12480 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
12481 AssertRCReturn(rc, rc);
12482 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
12483 {
12484 uint32_t uErrCode;
12485 RTGCUINTPTR GCPtrFaultAddress;
12486 uint32_t const uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
12487 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
12488 bool const fErrorCodeValid = VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
12489 if (fErrorCodeValid)
12490 {
12491 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
12492 AssertRCReturn(rc, rc);
12493 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
12494 }
12495 else
12496 uErrCode = 0;
12497
12498 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
12499 && uVector == X86_XCPT_PF)
12500 GCPtrFaultAddress = pMixedCtx->cr2;
12501 else
12502 GCPtrFaultAddress = 0;
12503
12504 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
12505 0 /* cbInstr */, uErrCode, GCPtrFaultAddress);
12506
12507 Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
12508 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
12509 return VINF_EM_RAW_INJECT_TRPM_EVENT;
12510 }
12511 }
12512
12513 /* Fall back to the interpreter to emulate the task-switch. */
12514 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
12515 return VERR_EM_INTERPRETER;
12516}
12517
12518
12519/**
12520 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
12521 */
12522HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12523{
12524 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12525 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
12526 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
12527 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
12528 AssertRCReturn(rc, rc);
12529 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
12530 return VINF_EM_DBG_STEPPED;
12531}
12532
12533
12534/**
12535 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
12536 */
12537HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12538{
12539 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12540
12541 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
12542
12543 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
12544 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
12545 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
12546 {
12547 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
12548 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
12549 {
12550 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret);
12551 return VINF_EM_RAW_INJECT_TRPM_EVENT;
12552 }
12553 }
12554 else
12555 {
12556 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
12557 rcStrict1 = VINF_SUCCESS;
12558 return rcStrict1;
12559 }
12560
12561 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
12562 int rc = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
12563 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12564 AssertRCReturn(rc, rc);
12565
12566 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
12567 uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
12568 VBOXSTRICTRC rcStrict2;
12569 switch (uAccessType)
12570 {
12571 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
12572 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
12573 {
12574 AssertMsg( !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
12575 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) != XAPIC_OFF_TPR,
12576 ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
12577
12578 RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64MsrApicBase; /* Always up-to-date, u64MsrApicBase is not part of the VMCS. */
12579 GCPhys &= PAGE_BASE_GC_MASK;
12580 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
12581 PVM pVM = pVCpu->CTX_SUFF(pVM);
12582 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
12583 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
12584
12585 rcStrict2 = IOMMMIOPhysHandler(pVM, pVCpu,
12586 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW,
12587 CPUMCTX2CORE(pMixedCtx), GCPhys);
12588 Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
12589 if ( rcStrict2 == VINF_SUCCESS
12590 || rcStrict2 == VERR_PAGE_TABLE_NOT_PRESENT
12591 || rcStrict2 == VERR_PAGE_NOT_PRESENT)
12592 {
12593 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP
12594 | HM_CHANGED_GUEST_RSP
12595 | HM_CHANGED_GUEST_RFLAGS
12596 | HM_CHANGED_GUEST_APIC_TPR);
12597 rcStrict2 = VINF_SUCCESS;
12598 }
12599 break;
12600 }
12601
12602 default:
12603 Log4Func(("uAccessType=%#x\n", uAccessType));
12604 rcStrict2 = VINF_EM_RAW_EMULATE_INSTR;
12605 break;
12606 }
12607
12608 if (rcStrict2 != VINF_SUCCESS)
12609 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchApicAccessToR3);
12610 return rcStrict2;
12611}
12612
12613
12614/**
12615 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
12616 * VM-exit.
12617 */
12618HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12619{
12620 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12621
12622 /* We should -not- get this VM-exit if the guest's debug registers were active. */
12623 if (pVmxTransient->fWasGuestDebugStateActive)
12624 {
12625 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
12626 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
12627 }
12628
12629 if ( !pVCpu->hm.s.fSingleInstruction
12630 && !pVmxTransient->fWasHyperDebugStateActive)
12631 {
12632 Assert(!DBGFIsStepping(pVCpu));
12633 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB));
12634
12635 /* Don't intercept MOV DRx any more. */
12636 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
12637 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
12638 AssertRCReturn(rc, rc);
12639
12640 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
12641 VMMRZCallRing3Disable(pVCpu);
12642 HM_DISABLE_PREEMPT();
12643
12644 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
12645 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
12646 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
12647
12648 HM_RESTORE_PREEMPT();
12649 VMMRZCallRing3Enable(pVCpu);
12650
12651#ifdef VBOX_WITH_STATISTICS
12652 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12653 AssertRCReturn(rc, rc);
12654 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
12655 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
12656 else
12657 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
12658#endif
12659 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
12660 return VINF_SUCCESS;
12661 }
12662
12663 /*
12664 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date.
12665 * Update the segment registers and DR7 from the CPU.
12666 */
12667 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12668 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK
12669 | CPUMCTX_EXTRN_DR7);
12670 AssertRCReturn(rc, rc);
12671 Log4Func(("CS:RIP=%04x:%08RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
12672
12673 PVM pVM = pVCpu->CTX_SUFF(pVM);
12674 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
12675 {
12676 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
12677 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification),
12678 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification));
12679 if (RT_SUCCESS(rc))
12680 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
12681 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
12682 }
12683 else
12684 {
12685 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
12686 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification),
12687 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification));
12688 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
12689 }
12690
12691 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
12692 if (RT_SUCCESS(rc))
12693 {
12694 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
12695 AssertRCReturn(rc2, rc2);
12696 return VINF_SUCCESS;
12697 }
12698 return rc;
12699}
12700
12701
12702/**
12703 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
12704 * Conditional VM-exit.
12705 */
12706HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12707{
12708 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12709 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
12710
12711 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
12712 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
12713 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
12714 {
12715 /* If event delivery causes an EPT misconfig (MMIO), go back to instruction emulation as otherwise
12716 injecting the original pending event would most likely cause the same EPT misconfig VM-exit. */
12717 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
12718 {
12719 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret);
12720 return VINF_EM_RAW_INJECT_TRPM_EVENT;
12721 }
12722 }
12723 else
12724 {
12725 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
12726 rcStrict1 = VINF_SUCCESS;
12727 return rcStrict1;
12728 }
12729
12730 /*
12731 * Get sufficent state and update the exit history entry.
12732 */
12733 RTGCPHYS GCPhys;
12734 int rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
12735 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
12736 AssertRCReturn(rc, rc);
12737
12738 VBOXSTRICTRC rcStrict;
12739 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
12740 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
12741 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
12742 if (!pExitRec)
12743 {
12744 /*
12745 * If we succeed, resume guest execution.
12746 * If we fail in interpreting the instruction because we couldn't get the guest physical address
12747 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
12748 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
12749 * weird case. See @bugref{6043}.
12750 */
12751 PVM pVM = pVCpu->CTX_SUFF(pVM);
12752 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
12753 Log4(("EPT misconfig at %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pMixedCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
12754 if ( rcStrict == VINF_SUCCESS
12755 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
12756 || rcStrict == VERR_PAGE_NOT_PRESENT)
12757 {
12758 /* Successfully handled MMIO operation. */
12759 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP
12760 | HM_CHANGED_GUEST_RSP
12761 | HM_CHANGED_GUEST_RFLAGS
12762 | HM_CHANGED_GUEST_APIC_TPR);
12763 rcStrict = VINF_SUCCESS;
12764 }
12765 }
12766 else
12767 {
12768 /*
12769 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
12770 */
12771 Assert(pMixedCtx == &pVCpu->cpum.GstCtx);
12772 int rc2 = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
12773 AssertRCReturn(rc2, rc2);
12774
12775 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
12776 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
12777
12778 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
12779 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
12780
12781 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
12782 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
12783 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
12784 }
12785 return VBOXSTRICTRC_TODO(rcStrict);
12786}
12787
12788
12789/**
12790 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
12791 * VM-exit.
12792 */
12793HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12794{
12795 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
12796 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
12797
12798 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
12799 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
12800 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
12801 {
12802 /* In the unlikely case that the EPT violation happened as a result of delivering an event, log it. */
12803 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
12804 Log4Func(("EPT violation with an event pending u64IntInfo=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo));
12805 }
12806 else
12807 {
12808 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
12809 rcStrict1 = VINF_SUCCESS;
12810 return rcStrict1;
12811 }
12812
12813 RTGCPHYS GCPhys;
12814 int rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
12815 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12816 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
12817 AssertRCReturn(rc, rc);
12818
12819 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
12820 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
12821
12822 RTGCUINT uErrorCode = 0;
12823 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_INSTR_FETCH)
12824 uErrorCode |= X86_TRAP_PF_ID;
12825 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_DATA_WRITE)
12826 uErrorCode |= X86_TRAP_PF_RW;
12827 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT)
12828 uErrorCode |= X86_TRAP_PF_P;
12829
12830 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
12831
12832 Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys,
12833 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
12834
12835 /* Handle the pagefault trap for the nested shadow table. */
12836 PVM pVM = pVCpu->CTX_SUFF(pVM);
12837 VBOXSTRICTRC rcStrict2 = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
12838 TRPMResetTrap(pVCpu);
12839
12840 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
12841 if ( rcStrict2 == VINF_SUCCESS
12842 || rcStrict2 == VERR_PAGE_TABLE_NOT_PRESENT
12843 || rcStrict2 == VERR_PAGE_NOT_PRESENT)
12844 {
12845 /* Successfully synced our nested page tables. */
12846 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
12847 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP
12848 | HM_CHANGED_GUEST_RSP
12849 | HM_CHANGED_GUEST_RFLAGS);
12850 return VINF_SUCCESS;
12851 }
12852
12853 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
12854 return rcStrict2;
12855}
12856
12857/** @} */
12858
12859/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
12860/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
12861/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
12862
12863/** @name VM-exit exception handlers.
12864 * @{
12865 */
12866
12867/**
12868 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
12869 */
12870static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12871{
12872 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12873 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
12874
12875 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0);
12876 AssertRCReturn(rc, rc);
12877
12878 if (!(pMixedCtx->cr0 & X86_CR0_NE))
12879 {
12880 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
12881 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
12882
12883 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
12884 * provides VM-exit instruction length. If this causes problem later,
12885 * disassemble the instruction like it's done on AMD-V. */
12886 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
12887 AssertRCReturn(rc2, rc2);
12888 return rc;
12889 }
12890
12891 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12892 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
12893 return rc;
12894}
12895
12896
12897/**
12898 * VM-exit exception handler for \#BP (Breakpoint exception).
12899 */
12900static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12901{
12902 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12903 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
12904
12905 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
12906 AssertRCReturn(rc, rc);
12907
12908 rc = DBGFRZTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx));
12909 if (rc == VINF_EM_RAW_GUEST_TRAP)
12910 {
12911 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
12912 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12913 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12914 AssertRCReturn(rc, rc);
12915
12916 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12917 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
12918 }
12919
12920 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
12921 return rc;
12922}
12923
12924
12925/**
12926 * VM-exit exception handler for \#AC (alignment check exception).
12927 */
12928static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12929{
12930 RT_NOREF_PV(pMixedCtx);
12931 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12932
12933 /*
12934 * Re-inject it. We'll detect any nesting before getting here.
12935 */
12936 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12937 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12938 AssertRCReturn(rc, rc);
12939 Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
12940
12941 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
12942 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
12943 return VINF_SUCCESS;
12944}
12945
12946
12947/**
12948 * VM-exit exception handler for \#DB (Debug exception).
12949 */
12950static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
12951{
12952 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
12953 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
12954
12955 /*
12956 * Get the DR6-like values from the VM-exit qualification and pass it to DBGF
12957 * for processing.
12958 */
12959 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
12960
12961 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
12962 uint64_t uDR6 = X86_DR6_INIT_VAL;
12963 uDR6 |= ( pVmxTransient->uExitQualification
12964 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
12965
12966 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
12967 Log6Func(("rc=%Rrc\n", rc));
12968 if (rc == VINF_EM_RAW_GUEST_TRAP)
12969 {
12970 /*
12971 * The exception was for the guest. Update DR6, DR7.GD and
12972 * IA32_DEBUGCTL.LBR before forwarding it.
12973 * (See Intel spec. 27.1 "Architectural State before a VM-Exit".)
12974 */
12975 VMMRZCallRing3Disable(pVCpu);
12976 HM_DISABLE_PREEMPT();
12977
12978 pMixedCtx->dr[6] &= ~X86_DR6_B_MASK;
12979 pMixedCtx->dr[6] |= uDR6;
12980 if (CPUMIsGuestDebugStateActive(pVCpu))
12981 ASMSetDR6(pMixedCtx->dr[6]);
12982
12983 HM_RESTORE_PREEMPT();
12984 VMMRZCallRing3Enable(pVCpu);
12985
12986 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_DR7);
12987 AssertRCReturn(rc, rc);
12988
12989 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
12990 pMixedCtx->dr[7] &= ~X86_DR7_GD;
12991
12992 /* Paranoia. */
12993 pMixedCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
12994 pMixedCtx->dr[7] |= X86_DR7_RA1_MASK;
12995
12996 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
12997 AssertRCReturn(rc, rc);
12998
12999 /*
13000 * Raise #DB in the guest.
13001 *
13002 * It is important to reflect exactly what the VM-exit gave us (preserving the
13003 * interruption-type) rather than use hmR0VmxSetPendingXcptDB() as the #DB could've
13004 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
13005 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
13006 *
13007 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
13008 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
13009 */
13010 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
13011 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13012 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
13013 AssertRCReturn(rc, rc);
13014 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
13015 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
13016 return VINF_SUCCESS;
13017 }
13018
13019 /*
13020 * Not a guest trap, must be a hypervisor related debug event then.
13021 * Update DR6 in case someone is interested in it.
13022 */
13023 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
13024 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
13025 CPUMSetHyperDR6(pVCpu, uDR6);
13026
13027 return rc;
13028}
13029
13030/**
13031 * VM-exit exception handler for \#GP (General-protection exception).
13032 *
13033 * @remarks Requires pVmxTransient->uExitIntInfo to be up-to-date.
13034 */
13035static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
13036{
13037 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
13038 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
13039
13040 int rc;
13041 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
13042 { /* likely */ }
13043 else
13044 {
13045#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
13046 Assert(pVCpu->hm.s.fUsingDebugLoop);
13047#endif
13048 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
13049 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
13050 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
13051 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13052 rc |= hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
13053 AssertRCReturn(rc, rc);
13054 Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip,
13055 pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));
13056 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
13057 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
13058 return rc;
13059 }
13060
13061 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
13062 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
13063
13064 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
13065 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
13066 AssertRCReturn(rc, rc);
13067
13068 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
13069 uint32_t cbOp = 0;
13070 PVM pVM = pVCpu->CTX_SUFF(pVM);
13071 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction;
13072 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
13073 if (RT_SUCCESS(rc))
13074 {
13075 rc = VINF_SUCCESS;
13076 Assert(cbOp == pDis->cbInstr);
13077 Log4Func(("Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
13078 switch (pDis->pCurInstr->uOpcode)
13079 {
13080 case OP_CLI:
13081 {
13082 pMixedCtx->eflags.Bits.u1IF = 0;
13083 pMixedCtx->eflags.Bits.u1RF = 0;
13084 pMixedCtx->rip += pDis->cbInstr;
13085 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13086 if ( !fDbgStepping
13087 && pMixedCtx->eflags.Bits.u1TF)
13088 {
13089 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
13090 AssertRCReturn(rc, rc);
13091 }
13092 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
13093 break;
13094 }
13095
13096 case OP_STI:
13097 {
13098 bool fOldIF = pMixedCtx->eflags.Bits.u1IF;
13099 pMixedCtx->eflags.Bits.u1IF = 1;
13100 pMixedCtx->eflags.Bits.u1RF = 0;
13101 pMixedCtx->rip += pDis->cbInstr;
13102 if (!fOldIF)
13103 {
13104 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
13105 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13106 }
13107 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13108 if ( !fDbgStepping
13109 && pMixedCtx->eflags.Bits.u1TF)
13110 {
13111 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
13112 AssertRCReturn(rc, rc);
13113 }
13114 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
13115 break;
13116 }
13117
13118 case OP_HLT:
13119 {
13120 rc = VINF_EM_HALT;
13121 pMixedCtx->rip += pDis->cbInstr;
13122 pMixedCtx->eflags.Bits.u1RF = 0;
13123 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13124 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
13125 break;
13126 }
13127
13128 case OP_POPF:
13129 {
13130 Log4Func(("POPF CS:EIP %04x:%04RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
13131 uint32_t cbParm;
13132 uint32_t uMask;
13133 bool fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
13134 if (pDis->fPrefix & DISPREFIX_OPSIZE)
13135 {
13136 cbParm = 4;
13137 uMask = 0xffffffff;
13138 }
13139 else
13140 {
13141 cbParm = 2;
13142 uMask = 0xffff;
13143 }
13144
13145 /* Get the stack pointer & pop the contents of the stack onto Eflags. */
13146 RTGCPTR GCPtrStack = 0;
13147 X86EFLAGS Eflags;
13148 Eflags.u32 = 0;
13149 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
13150 &GCPtrStack);
13151 if (RT_SUCCESS(rc))
13152 {
13153 Assert(sizeof(Eflags.u32) >= cbParm);
13154 rc = VBOXSTRICTRC_TODO(PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u32, cbParm, PGMACCESSORIGIN_HM));
13155 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc)); /** @todo allow strict return codes here */
13156 }
13157 if (RT_FAILURE(rc))
13158 {
13159 rc = VERR_EM_INTERPRETER;
13160 break;
13161 }
13162 Log4Func(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
13163 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF))
13164 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask);
13165 pMixedCtx->esp += cbParm;
13166 pMixedCtx->esp &= uMask;
13167 pMixedCtx->rip += pDis->cbInstr;
13168 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP
13169 | HM_CHANGED_GUEST_RSP
13170 | HM_CHANGED_GUEST_RFLAGS);
13171 /* Generate a pending-debug exception when the guest stepping over POPF regardless of how
13172 POPF restores EFLAGS.TF. */
13173 if ( !fDbgStepping
13174 && fGstStepping)
13175 {
13176 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
13177 AssertRCReturn(rc, rc);
13178 }
13179 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
13180 break;
13181 }
13182
13183 case OP_PUSHF:
13184 {
13185 uint32_t cbParm;
13186 uint32_t uMask;
13187 if (pDis->fPrefix & DISPREFIX_OPSIZE)
13188 {
13189 cbParm = 4;
13190 uMask = 0xffffffff;
13191 }
13192 else
13193 {
13194 cbParm = 2;
13195 uMask = 0xffff;
13196 }
13197
13198 /* Get the stack pointer & push the contents of eflags onto the stack. */
13199 RTGCPTR GCPtrStack = 0;
13200 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
13201 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
13202 if (RT_FAILURE(rc))
13203 {
13204 rc = VERR_EM_INTERPRETER;
13205 break;
13206 }
13207 X86EFLAGS Eflags = pMixedCtx->eflags;
13208 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
13209 Eflags.Bits.u1RF = 0;
13210 Eflags.Bits.u1VM = 0;
13211
13212 rc = VBOXSTRICTRC_TODO(PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u, cbParm, PGMACCESSORIGIN_HM));
13213 if (RT_UNLIKELY(rc != VINF_SUCCESS))
13214 {
13215 AssertMsgFailed(("%Rrc\n", rc)); /** @todo allow strict return codes here */
13216 rc = VERR_EM_INTERPRETER;
13217 break;
13218 }
13219 Log4Func(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack));
13220 pMixedCtx->esp -= cbParm;
13221 pMixedCtx->esp &= uMask;
13222 pMixedCtx->rip += pDis->cbInstr;
13223 pMixedCtx->eflags.Bits.u1RF = 0;
13224 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP
13225 | HM_CHANGED_GUEST_RSP
13226 | HM_CHANGED_GUEST_RFLAGS);
13227 if ( !fDbgStepping
13228 && pMixedCtx->eflags.Bits.u1TF)
13229 {
13230 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
13231 AssertRCReturn(rc, rc);
13232 }
13233 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
13234 break;
13235 }
13236
13237 case OP_IRET:
13238 {
13239 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
13240 * instruction reference. */
13241 RTGCPTR GCPtrStack = 0;
13242 uint32_t uMask = 0xffff;
13243 bool fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF);
13244 uint16_t aIretFrame[3];
13245 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
13246 {
13247 rc = VERR_EM_INTERPRETER;
13248 break;
13249 }
13250 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
13251 &GCPtrStack);
13252 if (RT_SUCCESS(rc))
13253 {
13254 rc = VBOXSTRICTRC_TODO(PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame),
13255 PGMACCESSORIGIN_HM));
13256 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc)); /** @todo allow strict return codes here */
13257 }
13258 if (RT_FAILURE(rc))
13259 {
13260 rc = VERR_EM_INTERPRETER;
13261 break;
13262 }
13263 pMixedCtx->eip = 0;
13264 pMixedCtx->ip = aIretFrame[0];
13265 pMixedCtx->cs.Sel = aIretFrame[1];
13266 pMixedCtx->cs.ValidSel = aIretFrame[1];
13267 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
13268 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF))
13269 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
13270 pMixedCtx->sp += sizeof(aIretFrame);
13271 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP
13272 | HM_CHANGED_GUEST_CS
13273 | HM_CHANGED_GUEST_RSP
13274 | HM_CHANGED_GUEST_RFLAGS);
13275 /* Generate a pending-debug exception when stepping over IRET regardless of how IRET modifies EFLAGS.TF. */
13276 if ( !fDbgStepping
13277 && fGstStepping)
13278 {
13279 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx);
13280 AssertRCReturn(rc, rc);
13281 }
13282 Log4Func(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
13283 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
13284 break;
13285 }
13286
13287 case OP_INT:
13288 {
13289 uint16_t uVector = pDis->Param1.uValue & 0xff;
13290 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
13291 /* INT clears EFLAGS.TF, we must not set any pending debug exceptions here. */
13292 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
13293 break;
13294 }
13295
13296 case OP_INTO:
13297 {
13298 if (pMixedCtx->eflags.Bits.u1OF)
13299 {
13300 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
13301 /* INTO clears EFLAGS.TF, we must not set any pending debug exceptions here. */
13302 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
13303 }
13304 else
13305 {
13306 pMixedCtx->eflags.Bits.u1RF = 0;
13307 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
13308 }
13309 break;
13310 }
13311
13312 default:
13313 {
13314 pMixedCtx->eflags.Bits.u1RF = 0; /* This is correct most of the time... */
13315 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
13316 EMCODETYPE_SUPERVISOR);
13317 rc = VBOXSTRICTRC_VAL(rc2);
13318 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
13319 /** @todo We have to set pending-debug exceptions here when the guest is
13320 * single-stepping depending on the instruction that was interpreted. */
13321 Log4Func(("#GP rc=%Rrc\n", rc));
13322 break;
13323 }
13324 }
13325 }
13326 else
13327 rc = VERR_EM_INTERPRETER;
13328
13329 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
13330 ("#GP Unexpected rc=%Rrc\n", rc));
13331 return rc;
13332}
13333
13334
13335/**
13336 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
13337 * the exception reported in the VMX transient structure back into the VM.
13338 *
13339 * @remarks Requires uExitIntInfo in the VMX transient structure to be
13340 * up-to-date.
13341 */
13342static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
13343{
13344 RT_NOREF_PV(pMixedCtx);
13345 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
13346#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
13347 AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVCpu->hm.s.vmx.RealMode.fRealOnV86Active,
13348 ("uVector=%#x u32XcptBitmap=%#X32\n",
13349 VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVCpu->hm.s.vmx.u32XcptBitmap));
13350#endif
13351
13352 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
13353 hmR0VmxCheckExitDueToEventDelivery(). */
13354 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
13355 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13356 AssertRCReturn(rc, rc);
13357 Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
13358
13359#ifdef DEBUG_ramshankar
13360 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS
13361 | CPUMCTX_EXTRN_RIP);
13362 uint8_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo);
13363 Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip));
13364#endif
13365
13366 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
13367 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
13368 return VINF_SUCCESS;
13369}
13370
13371
13372/**
13373 * VM-exit exception handler for \#PF (Page-fault exception).
13374 */
13375static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
13376{
13377 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
13378 PVM pVM = pVCpu->CTX_SUFF(pVM);
13379 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
13380 rc |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
13381 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
13382 AssertRCReturn(rc, rc);
13383
13384 if (!pVM->hm.s.fNestedPaging)
13385 { /* likely */ }
13386 else
13387 {
13388#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF)
13389 Assert(pVCpu->hm.s.fUsingDebugLoop);
13390#endif
13391 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
13392 if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF))
13393 {
13394 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
13395 0 /* cbInstr */, pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQualification);
13396 }
13397 else
13398 {
13399 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
13400 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
13401 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
13402 }
13403 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
13404 return rc;
13405 }
13406
13407 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
13408 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
13409 if (pVmxTransient->fVectoringPF)
13410 {
13411 Assert(pVCpu->hm.s.Event.fPending);
13412 return VINF_EM_RAW_INJECT_TRPM_EVENT;
13413 }
13414
13415 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
13416 AssertRCReturn(rc, rc);
13417
13418 Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
13419 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntErrorCode, pMixedCtx->cr3));
13420
13421 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
13422 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pMixedCtx),
13423 (RTGCPTR)pVmxTransient->uExitQualification);
13424
13425 Log4Func(("#PF: rc=%Rrc\n", rc));
13426 if (rc == VINF_SUCCESS)
13427 {
13428 /*
13429 * This is typically a shadow page table sync or a MMIO instruction. But we may have
13430 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
13431 */
13432 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
13433 TRPMResetTrap(pVCpu);
13434 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
13435 return rc;
13436 }
13437
13438 if (rc == VINF_EM_RAW_GUEST_TRAP)
13439 {
13440 if (!pVmxTransient->fVectoringDoublePF)
13441 {
13442 /* It's a guest page fault and needs to be reflected to the guest. */
13443 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
13444 TRPMResetTrap(pVCpu);
13445 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
13446 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
13447 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
13448 }
13449 else
13450 {
13451 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
13452 TRPMResetTrap(pVCpu);
13453 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
13454 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
13455 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
13456 }
13457
13458 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
13459 return VINF_SUCCESS;
13460 }
13461
13462 TRPMResetTrap(pVCpu);
13463 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
13464 return rc;
13465}
13466
13467/** @} */
13468
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette