VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 78961

Last change on this file since 78961 was 78961, checked in by vboxsync, 6 years ago

VMM/HMVMXR0: Nested VMX: bugref:9180 Comments.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 690.7 KB
Line 
1/* $Id: HMVMXR0.cpp 78961 2019-06-04 10:48:07Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/x86.h>
25#include <iprt/asm-amd64-x86.h>
26#include <iprt/thread.h>
27
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/dbgf.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/iom.h>
32#include <VBox/vmm/selm.h>
33#include <VBox/vmm/tm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/gim.h>
36#include <VBox/vmm/apic.h>
37#ifdef VBOX_WITH_REM
38# include <VBox/vmm/rem.h>
39#endif
40#include "HMInternal.h"
41#include <VBox/vmm/vm.h>
42#include <VBox/vmm/hmvmxinline.h>
43#include "HMVMXR0.h"
44#include "dtrace/VBoxVMM.h"
45
46#ifdef DEBUG_ramshankar
47# define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS
48# define HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE
49# define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
50# define HMVMX_ALWAYS_CHECK_GUEST_STATE
51# define HMVMX_ALWAYS_TRAP_ALL_XCPTS
52# define HMVMX_ALWAYS_TRAP_PF
53# define HMVMX_ALWAYS_FLUSH_TLB
54# define HMVMX_ALWAYS_SWAP_EFER
55#endif
56
57
58/*********************************************************************************************************************************
59* Defined Constants And Macros *
60*********************************************************************************************************************************/
61/** Use the function table. */
62#define HMVMX_USE_FUNCTION_TABLE
63
64/** Determine which tagged-TLB flush handler to use. */
65#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
66#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
67#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
68#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
69
70/** @name HMVMX_READ_XXX
71 * Flags to skip redundant reads of some common VMCS fields that are not part of
72 * the guest-CPU or VCPU state but are needed while handling VM-exits.
73 */
74#define HMVMX_READ_IDT_VECTORING_INFO RT_BIT_32(0)
75#define HMVMX_READ_IDT_VECTORING_ERROR_CODE RT_BIT_32(1)
76#define HMVMX_READ_EXIT_QUALIFICATION RT_BIT_32(2)
77#define HMVMX_READ_EXIT_INSTR_LEN RT_BIT_32(3)
78#define HMVMX_READ_EXIT_INTERRUPTION_INFO RT_BIT_32(4)
79#define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5)
80#define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6)
81#define HMVMX_READ_GUEST_LINEAR_ADDR RT_BIT_32(7)
82/** @} */
83
84/**
85 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
86 * guest using hardware-assisted VMX.
87 *
88 * This excludes state like GPRs (other than RSP) which are always are
89 * swapped and restored across the world-switch and also registers like EFER,
90 * MSR which cannot be modified by the guest without causing a VM-exit.
91 */
92#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
93 | CPUMCTX_EXTRN_RFLAGS \
94 | CPUMCTX_EXTRN_RSP \
95 | CPUMCTX_EXTRN_SREG_MASK \
96 | CPUMCTX_EXTRN_TABLE_MASK \
97 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
98 | CPUMCTX_EXTRN_SYSCALL_MSRS \
99 | CPUMCTX_EXTRN_SYSENTER_MSRS \
100 | CPUMCTX_EXTRN_TSC_AUX \
101 | CPUMCTX_EXTRN_OTHER_MSRS \
102 | CPUMCTX_EXTRN_CR0 \
103 | CPUMCTX_EXTRN_CR3 \
104 | CPUMCTX_EXTRN_CR4 \
105 | CPUMCTX_EXTRN_DR7 \
106 | CPUMCTX_EXTRN_HM_VMX_MASK)
107
108/**
109 * Exception bitmap mask for real-mode guests (real-on-v86).
110 *
111 * We need to intercept all exceptions manually except:
112 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
113 * due to bugs in Intel CPUs.
114 * - \#PF need not be intercepted even in real-mode if we have nested paging
115 * support.
116 */
117#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
118 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
119 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
120 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
121 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
122 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
123 | RT_BIT(X86_XCPT_XF))
124
125/** Maximum VM-instruction error number. */
126#define HMVMX_INSTR_ERROR_MAX 28
127
128/** Profiling macro. */
129#ifdef HM_PROFILE_EXIT_DISPATCH
130# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
131# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
132#else
133# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
134# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
135#endif
136
137/** Assert that preemption is disabled or covered by thread-context hooks. */
138#define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
139 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
140
141/** Assert that we haven't migrated CPUs when thread-context hooks are not
142 * used. */
143#define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
144 || (a_pVCpu)->hm.s.idEnteredCpu == RTMpCpuId(), \
145 ("Illegal migration! Entered on CPU %u Current %u\n", \
146 (a_pVCpu)->hm.s.idEnteredCpu, RTMpCpuId()))
147
148/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
149 * context. */
150#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
151 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
152 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
153
154/** Helper macro for VM-exit handlers called unexpectedly. */
155#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
156 do { \
157 (a_pVCpu)->hm.s.u32HMError = (a_HmError); \
158 return VERR_VMX_UNEXPECTED_EXIT; \
159 } while (0)
160
161#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
162/** Macro that does the necessary privilege checks and intercepted VM-exits for
163 * guests that attempted to execute a VMX instruction. */
164# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
165 do \
166 { \
167 VBOXSTRICTRC rcStrictTmp = hmR0VmxCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
168 if (rcStrictTmp == VINF_SUCCESS) \
169 { /* likely */ } \
170 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
171 { \
172 Assert((a_pVCpu)->hm.s.Event.fPending); \
173 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
174 return VINF_SUCCESS; \
175 } \
176 else \
177 { \
178 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
179 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
180 } \
181 } while (0)
182
183/** Macro that decodes a memory operand for an instruction VM-exit. */
184# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
185 do \
186 { \
187 VBOXSTRICTRC rcStrictTmp = hmR0VmxDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
188 (a_pGCPtrEffAddr)); \
189 if (rcStrictTmp == VINF_SUCCESS) \
190 { /* likely */ } \
191 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
192 { \
193 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
194 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
195 NOREF(uXcptTmp); \
196 return VINF_SUCCESS; \
197 } \
198 else \
199 { \
200 Log4Func(("hmR0VmxDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
201 return rcStrictTmp; \
202 } \
203 } while (0)
204
205#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
206
207
208/*********************************************************************************************************************************
209* Structures and Typedefs *
210*********************************************************************************************************************************/
211/**
212 * VMX transient state.
213 *
214 * A state structure for holding miscellaneous information across
215 * VMX non-root operation and restored after the transition.
216 */
217typedef struct VMXTRANSIENT
218{
219 /** The host's rflags/eflags. */
220 RTCCUINTREG fEFlags;
221#if HC_ARCH_BITS == 32
222 uint32_t u32Alignment0;
223#endif
224 /** The guest's TPR value used for TPR shadowing. */
225 uint8_t u8GuestTpr;
226 /** Alignment. */
227 uint8_t abAlignment0[7];
228
229 /** The basic VM-exit reason. */
230 uint16_t uExitReason;
231 /** Alignment. */
232 uint16_t u16Alignment0;
233 /** The VM-exit interruption error code. */
234 uint32_t uExitIntErrorCode;
235 /** The VM-exit exit code qualification. */
236 uint64_t uExitQual;
237 /** The Guest-linear address. */
238 uint64_t uGuestLinearAddr;
239
240 /** The VM-exit interruption-information field. */
241 uint32_t uExitIntInfo;
242 /** The VM-exit instruction-length field. */
243 uint32_t cbInstr;
244 /** The VM-exit instruction-information field. */
245 VMXEXITINSTRINFO ExitInstrInfo;
246 /** Whether the VM-entry failed or not. */
247 bool fVMEntryFailed;
248 /** Whether we are currently executing a nested-guest. */
249 bool fIsNestedGuest;
250 /** Alignment. */
251 uint8_t abAlignment1[2];
252
253 /** The VM-entry interruption-information field. */
254 uint32_t uEntryIntInfo;
255 /** The VM-entry exception error code field. */
256 uint32_t uEntryXcptErrorCode;
257 /** The VM-entry instruction length field. */
258 uint32_t cbEntryInstr;
259
260 /** IDT-vectoring information field. */
261 uint32_t uIdtVectoringInfo;
262 /** IDT-vectoring error code. */
263 uint32_t uIdtVectoringErrorCode;
264
265 /** Mask of currently read VMCS fields; HMVMX_READ_XXX. */
266 uint32_t fVmcsFieldsRead;
267
268 /** Whether the guest debug state was active at the time of VM-exit. */
269 bool fWasGuestDebugStateActive;
270 /** Whether the hyper debug state was active at the time of VM-exit. */
271 bool fWasHyperDebugStateActive;
272 /** Whether TSC-offsetting and VMX-preemption timer was updated before VM-entry. */
273 bool fUpdatedTscOffsettingAndPreemptTimer;
274 /** Whether the VM-exit was caused by a page-fault during delivery of a
275 * contributory exception or a page-fault. */
276 bool fVectoringDoublePF;
277 /** Whether the VM-exit was caused by a page-fault during delivery of an
278 * external interrupt or NMI. */
279 bool fVectoringPF;
280 bool afAlignment0[3];
281
282 /** The VMCS info. object. */
283 PVMXVMCSINFO pVmcsInfo;
284} VMXTRANSIENT;
285AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
286AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntInfo, sizeof(uint64_t));
287AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo, sizeof(uint64_t));
288AssertCompileMemberAlignment(VMXTRANSIENT, fWasGuestDebugStateActive, sizeof(uint64_t));
289AssertCompileMemberAlignment(VMXTRANSIENT, pVmcsInfo, sizeof(uint64_t));
290AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
291/** Pointer to VMX transient state. */
292typedef VMXTRANSIENT *PVMXTRANSIENT;
293
294/**
295 * Memory operand read or write access.
296 */
297typedef enum VMXMEMACCESS
298{
299 VMXMEMACCESS_READ = 0,
300 VMXMEMACCESS_WRITE = 1
301} VMXMEMACCESS;
302
303/**
304 * VMX VM-exit handler.
305 *
306 * @returns Strict VBox status code (i.e. informational status codes too).
307 * @param pVCpu The cross context virtual CPU structure.
308 * @param pVmxTransient The VMX-transient structure.
309 */
310#ifndef HMVMX_USE_FUNCTION_TABLE
311typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
312#else
313typedef DECLCALLBACK(VBOXSTRICTRC) FNVMXEXITHANDLER(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
314/** Pointer to VM-exit handler. */
315typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
316#endif
317
318/**
319 * VMX VM-exit handler, non-strict status code.
320 *
321 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
322 *
323 * @returns VBox status code, no informational status code returned.
324 * @param pVCpu The cross context virtual CPU structure.
325 * @param pVmxTransient The VMX-transient structure.
326 *
327 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
328 * use of that status code will be replaced with VINF_EM_SOMETHING
329 * later when switching over to IEM.
330 */
331#ifndef HMVMX_USE_FUNCTION_TABLE
332typedef int FNVMXEXITHANDLERNSRC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
333#else
334typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
335#endif
336
337
338/*********************************************************************************************************************************
339* Internal Functions *
340*********************************************************************************************************************************/
341#ifndef HMVMX_USE_FUNCTION_TABLE
342DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
343# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
344# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
345#else
346# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
347# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
348#endif
349#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
350DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
351#endif
352
353static int hmR0VmxImportGuestState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
354#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
355static void hmR0VmxInitVmcsReadCache(PVMCPU pVCpu);
356#endif
357
358/** @name VM-exit handlers.
359 * @{
360 */
361static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmi;
362static FNVMXEXITHANDLER hmR0VmxExitExtInt;
363static FNVMXEXITHANDLER hmR0VmxExitTripleFault;
364static FNVMXEXITHANDLERNSRC hmR0VmxExitIntWindow;
365static FNVMXEXITHANDLERNSRC hmR0VmxExitNmiWindow;
366static FNVMXEXITHANDLER hmR0VmxExitTaskSwitch;
367static FNVMXEXITHANDLER hmR0VmxExitCpuid;
368static FNVMXEXITHANDLER hmR0VmxExitGetsec;
369static FNVMXEXITHANDLER hmR0VmxExitHlt;
370static FNVMXEXITHANDLERNSRC hmR0VmxExitInvd;
371static FNVMXEXITHANDLER hmR0VmxExitInvlpg;
372static FNVMXEXITHANDLER hmR0VmxExitRdpmc;
373static FNVMXEXITHANDLER hmR0VmxExitVmcall;
374#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
375static FNVMXEXITHANDLER hmR0VmxExitVmclear;
376static FNVMXEXITHANDLER hmR0VmxExitVmlaunch;
377static FNVMXEXITHANDLER hmR0VmxExitVmptrld;
378static FNVMXEXITHANDLER hmR0VmxExitVmptrst;
379static FNVMXEXITHANDLER hmR0VmxExitVmread;
380static FNVMXEXITHANDLER hmR0VmxExitVmresume;
381static FNVMXEXITHANDLER hmR0VmxExitVmwrite;
382static FNVMXEXITHANDLER hmR0VmxExitVmxoff;
383static FNVMXEXITHANDLER hmR0VmxExitVmxon;
384static FNVMXEXITHANDLER hmR0VmxExitInvvpid;
385#endif
386static FNVMXEXITHANDLER hmR0VmxExitRdtsc;
387static FNVMXEXITHANDLER hmR0VmxExitMovCRx;
388static FNVMXEXITHANDLER hmR0VmxExitMovDRx;
389static FNVMXEXITHANDLER hmR0VmxExitIoInstr;
390static FNVMXEXITHANDLER hmR0VmxExitRdmsr;
391static FNVMXEXITHANDLER hmR0VmxExitWrmsr;
392static FNVMXEXITHANDLER hmR0VmxExitMwait;
393static FNVMXEXITHANDLER hmR0VmxExitMtf;
394static FNVMXEXITHANDLER hmR0VmxExitMonitor;
395static FNVMXEXITHANDLER hmR0VmxExitPause;
396static FNVMXEXITHANDLERNSRC hmR0VmxExitTprBelowThreshold;
397static FNVMXEXITHANDLER hmR0VmxExitApicAccess;
398static FNVMXEXITHANDLER hmR0VmxExitEptViolation;
399static FNVMXEXITHANDLER hmR0VmxExitEptMisconfig;
400static FNVMXEXITHANDLER hmR0VmxExitRdtscp;
401static FNVMXEXITHANDLER hmR0VmxExitPreemptTimer;
402static FNVMXEXITHANDLERNSRC hmR0VmxExitWbinvd;
403static FNVMXEXITHANDLER hmR0VmxExitXsetbv;
404static FNVMXEXITHANDLER hmR0VmxExitInvpcid;
405static FNVMXEXITHANDLERNSRC hmR0VmxExitSetPendingXcptUD;
406static FNVMXEXITHANDLERNSRC hmR0VmxExitErrInvalidGuestState;
407static FNVMXEXITHANDLERNSRC hmR0VmxExitErrUnexpected;
408/** @} */
409
410#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
411/** @name Nested-guest VM-exit handlers.
412 * @{
413 */
414static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmiNested;
415//static FNVMXEXITHANDLER hmR0VmxExitExtIntNested;
416static FNVMXEXITHANDLER hmR0VmxExitTripleFaultNested;
417static FNVMXEXITHANDLERNSRC hmR0VmxExitIntWindowNested;
418static FNVMXEXITHANDLERNSRC hmR0VmxExitNmiWindowNested;
419static FNVMXEXITHANDLER hmR0VmxExitTaskSwitchNested;
420//static FNVMXEXITHANDLER hmR0VmxExitCpuid;
421//static FNVMXEXITHANDLER hmR0VmxExitGetsec;
422static FNVMXEXITHANDLER hmR0VmxExitHltNested;
423//static FNVMXEXITHANDLERNSRC hmR0VmxExitInvd;
424static FNVMXEXITHANDLER hmR0VmxExitInvlpgNested;
425static FNVMXEXITHANDLER hmR0VmxExitRdpmcNested;
426//static FNVMXEXITHANDLER hmR0VmxExitVmcall;
427//static FNVMXEXITHANDLER hmR0VmxExitVmclear;
428//static FNVMXEXITHANDLER hmR0VmxExitVmlaunch;
429//static FNVMXEXITHANDLER hmR0VmxExitVmptrld;
430//static FNVMXEXITHANDLER hmR0VmxExitVmptrst;
431static FNVMXEXITHANDLER hmR0VmxExitVmreadVmwriteNested;
432//static FNVMXEXITHANDLER hmR0VmxExitVmresume;
433//static FNVMXEXITHANDLER hmR0VmxExitVmwrite;
434//static FNVMXEXITHANDLER hmR0VmxExitVmxoff;
435//static FNVMXEXITHANDLER hmR0VmxExitVmxon;
436//static FNVMXEXITHANDLER hmR0VmxExitInvvpid;
437static FNVMXEXITHANDLER hmR0VmxExitRdtscNested;
438static FNVMXEXITHANDLER hmR0VmxExitMovCRxNested;
439static FNVMXEXITHANDLER hmR0VmxExitMovDRxNested;
440static FNVMXEXITHANDLER hmR0VmxExitIoInstrNested;
441static FNVMXEXITHANDLER hmR0VmxExitRdmsrNested;
442static FNVMXEXITHANDLER hmR0VmxExitWrmsrNested;
443static FNVMXEXITHANDLER hmR0VmxExitMwaitNested;
444static FNVMXEXITHANDLER hmR0VmxExitMtfNested;
445static FNVMXEXITHANDLER hmR0VmxExitMonitorNested;
446static FNVMXEXITHANDLER hmR0VmxExitPauseNested;
447static FNVMXEXITHANDLERNSRC hmR0VmxExitTprBelowThresholdNested;
448static FNVMXEXITHANDLER hmR0VmxExitApicAccessNested;
449static FNVMXEXITHANDLER hmR0VmxExitApicWriteNested;
450static FNVMXEXITHANDLER hmR0VmxExitVirtEoiNested;
451//static FNVMXEXITHANDLER hmR0VmxExitEptViolation;
452//static FNVMXEXITHANDLER hmR0VmxExitEptMisconfig;
453static FNVMXEXITHANDLER hmR0VmxExitRdtscpNested;
454//static FNVMXEXITHANDLER hmR0VmxExitPreemptTimer;
455static FNVMXEXITHANDLERNSRC hmR0VmxExitWbinvdNested;
456//static FNVMXEXITHANDLER hmR0VmxExitXsetbv;
457//static FNVMXEXITHANDLER hmR0VmxExitErrUnexpected;
458static FNVMXEXITHANDLER hmR0VmxExitInvpcidNested;
459//static FNVMXEXITHANDLERNSRC hmR0VmxExitSetPendingXcptUD;
460//static FNVMXEXITHANDLERNSRC hmR0VmxExitErrInvalidGuestState;
461//static FNVMXEXITHANDLERNSRC hmR0VmxExitErrUnexpected;
462static FNVMXEXITHANDLER hmR0VmxExitInstrNested;
463static FNVMXEXITHANDLER hmR0VmxExitInstrWithInfoNested;
464/** @} */
465#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
466
467/** @name Helpers for hardware exceptions VM-exit handlers.
468 * @{
469 */
470static VBOXSTRICTRC hmR0VmxExitXcptPF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
471static VBOXSTRICTRC hmR0VmxExitXcptMF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
472static VBOXSTRICTRC hmR0VmxExitXcptDB(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
473static VBOXSTRICTRC hmR0VmxExitXcptBP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
474static VBOXSTRICTRC hmR0VmxExitXcptGP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
475static VBOXSTRICTRC hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
476static VBOXSTRICTRC hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
477static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst);
478static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr);
479static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg);
480static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg);
481static VBOXSTRICTRC hmR0VmxExitHostNmi(PVMCPU pVCpu);
482/** @} */
483
484
485/*********************************************************************************************************************************
486* Global Variables *
487*********************************************************************************************************************************/
488#ifdef VMX_USE_CACHED_VMCS_ACCESSES
489static const uint32_t g_aVmcsCacheSegBase[] =
490{
491 VMX_VMCS_GUEST_ES_BASE_CACHE_IDX,
492 VMX_VMCS_GUEST_CS_BASE_CACHE_IDX,
493 VMX_VMCS_GUEST_SS_BASE_CACHE_IDX,
494 VMX_VMCS_GUEST_DS_BASE_CACHE_IDX,
495 VMX_VMCS_GUEST_FS_BASE_CACHE_IDX,
496 VMX_VMCS_GUEST_GS_BASE_CACHE_IDX
497};
498AssertCompile(RT_ELEMENTS(g_aVmcsCacheSegBase) == X86_SREG_COUNT);
499#endif
500static const uint32_t g_aVmcsSegBase[] =
501{
502 VMX_VMCS_GUEST_ES_BASE,
503 VMX_VMCS_GUEST_CS_BASE,
504 VMX_VMCS_GUEST_SS_BASE,
505 VMX_VMCS_GUEST_DS_BASE,
506 VMX_VMCS_GUEST_FS_BASE,
507 VMX_VMCS_GUEST_GS_BASE
508};
509static const uint32_t g_aVmcsSegSel[] =
510{
511 VMX_VMCS16_GUEST_ES_SEL,
512 VMX_VMCS16_GUEST_CS_SEL,
513 VMX_VMCS16_GUEST_SS_SEL,
514 VMX_VMCS16_GUEST_DS_SEL,
515 VMX_VMCS16_GUEST_FS_SEL,
516 VMX_VMCS16_GUEST_GS_SEL
517};
518static const uint32_t g_aVmcsSegLimit[] =
519{
520 VMX_VMCS32_GUEST_ES_LIMIT,
521 VMX_VMCS32_GUEST_CS_LIMIT,
522 VMX_VMCS32_GUEST_SS_LIMIT,
523 VMX_VMCS32_GUEST_DS_LIMIT,
524 VMX_VMCS32_GUEST_FS_LIMIT,
525 VMX_VMCS32_GUEST_GS_LIMIT
526};
527static const uint32_t g_aVmcsSegAttr[] =
528{
529 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
530 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
531 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
532 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
533 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
534 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
535};
536AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
537AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
538AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
539AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
540
541#ifdef HMVMX_USE_FUNCTION_TABLE
542/**
543 * VMX_EXIT dispatch table.
544 */
545static const PFNVMXEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
546{
547 /* 0 VMX_EXIT_XCPT_OR_NMI */ hmR0VmxExitXcptOrNmi,
548 /* 1 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
549 /* 2 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
550 /* 3 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitErrUnexpected,
551 /* 4 VMX_EXIT_SIPI */ hmR0VmxExitErrUnexpected,
552 /* 5 VMX_EXIT_IO_SMI */ hmR0VmxExitErrUnexpected,
553 /* 6 VMX_EXIT_SMI */ hmR0VmxExitErrUnexpected,
554 /* 7 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
555 /* 8 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
556 /* 9 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
557 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
558 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
559 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
560 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
561 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
562 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
563 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
564 /* 17 VMX_EXIT_RSM */ hmR0VmxExitErrUnexpected,
565 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitVmcall,
566#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
567 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitVmclear,
568 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitVmlaunch,
569 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitVmptrld,
570 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitVmptrst,
571 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitVmread,
572 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitVmresume,
573 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitVmwrite,
574 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitVmxoff,
575 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitVmxon,
576#else
577 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
578 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
579 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
580 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
581 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
582 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
583 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
584 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
585 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
586#endif
587 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
588 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
589 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
590 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
591 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
592 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
593 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrUnexpected,
594 /* 35 UNDEFINED */ hmR0VmxExitErrUnexpected,
595 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
596 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
597 /* 38 UNDEFINED */ hmR0VmxExitErrUnexpected,
598 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
599 /* 40 VMX_EXIT_PAUSE */ hmR0VmxExitPause,
600 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUnexpected,
601 /* 42 UNDEFINED */ hmR0VmxExitErrUnexpected,
602 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
603 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
604 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ hmR0VmxExitErrUnexpected,
605 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ hmR0VmxExitErrUnexpected,
606 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ hmR0VmxExitErrUnexpected,
607 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
608 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
609 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
610 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
611 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
612#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
613 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitInvvpid,
614#else
615 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
616#endif
617 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
618 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
619 /* 56 VMX_EXIT_APIC_WRITE */ hmR0VmxExitErrUnexpected,
620 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitErrUnexpected,
621 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
622 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitErrUnexpected,
623 /* 60 VMX_EXIT_ENCLS */ hmR0VmxExitErrUnexpected,
624 /* 61 VMX_EXIT_RDSEED */ hmR0VmxExitErrUnexpected,
625 /* 62 VMX_EXIT_PML_FULL */ hmR0VmxExitErrUnexpected,
626 /* 63 VMX_EXIT_XSAVES */ hmR0VmxExitErrUnexpected,
627 /* 64 VMX_EXIT_XRSTORS */ hmR0VmxExitErrUnexpected,
628 /* 65 UNDEFINED */ hmR0VmxExitErrUnexpected,
629 /* 66 VMX_EXIT_SPP_EVENT */ hmR0VmxExitErrUnexpected,
630 /* 67 VMX_EXIT_UMWAIT */ hmR0VmxExitErrUnexpected,
631 /* 68 VMX_EXIT_TPAUSE */ hmR0VmxExitErrUnexpected,
632};
633#endif /* HMVMX_USE_FUNCTION_TABLE */
634
635#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
636static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
637{
638 /* 0 */ "(Not Used)",
639 /* 1 */ "VMCALL executed in VMX root operation.",
640 /* 2 */ "VMCLEAR with invalid physical address.",
641 /* 3 */ "VMCLEAR with VMXON pointer.",
642 /* 4 */ "VMLAUNCH with non-clear VMCS.",
643 /* 5 */ "VMRESUME with non-launched VMCS.",
644 /* 6 */ "VMRESUME after VMXOFF",
645 /* 7 */ "VM-entry with invalid control fields.",
646 /* 8 */ "VM-entry with invalid host state fields.",
647 /* 9 */ "VMPTRLD with invalid physical address.",
648 /* 10 */ "VMPTRLD with VMXON pointer.",
649 /* 11 */ "VMPTRLD with incorrect revision identifier.",
650 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
651 /* 13 */ "VMWRITE to read-only VMCS component.",
652 /* 14 */ "(Not Used)",
653 /* 15 */ "VMXON executed in VMX root operation.",
654 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
655 /* 17 */ "VM-entry with non-launched executing VMCS.",
656 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
657 /* 19 */ "VMCALL with non-clear VMCS.",
658 /* 20 */ "VMCALL with invalid VM-exit control fields.",
659 /* 21 */ "(Not Used)",
660 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
661 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
662 /* 24 */ "VMCALL with invalid SMM-monitor features.",
663 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
664 /* 26 */ "VM-entry with events blocked by MOV SS.",
665 /* 27 */ "(Not Used)",
666 /* 28 */ "Invalid operand to INVEPT/INVVPID."
667};
668#endif /* VBOX_STRICT */
669
670
671/**
672 * Get the CR0 guest/host mask that does not change through the lifetime of a VM.
673 *
674 * Any bit set in this mask is owned by the host/hypervisor and would cause a
675 * VM-exit when modified by the guest.
676 *
677 * @returns The static CR0 guest/host mask.
678 * @param pVCpu The cross context virtual CPU structure.
679 */
680DECL_FORCE_INLINE(uint64_t) hmR0VmxGetFixedCr0Mask(PCVMCPU pVCpu)
681{
682 /*
683 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
684 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
685 */
686 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
687 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
688 * and @bugref{6944}. */
689 PVM pVM = pVCpu->CTX_SUFF(pVM);
690 return ( X86_CR0_PE
691 | X86_CR0_NE
692 | (pVM->hm.s.fNestedPaging ? 0 : X86_CR0_WP)
693 | X86_CR0_PG
694 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
695 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
696 | X86_CR0_NW); /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
697}
698
699
700/**
701 * Gets the CR4 guest/host mask that does not change through the lifetime of a VM.
702 *
703 * Any bit set in this mask is owned by the host/hypervisor and would cause a
704 * VM-exit when modified by the guest.
705 *
706 * @returns The static CR4 guest/host mask.
707 * @param pVCpu The cross context virtual CPU structure.
708 */
709DECL_FORCE_INLINE(uint64_t) hmR0VmxGetFixedCr4Mask(PCVMCPU pVCpu)
710{
711 /*
712 * We need to look at the host features here (for e.g. OSXSAVE, PCID) because
713 * these bits are reserved on hardware that does not support them. Since the
714 * CPU cannot refer to our virtual CPUID, we need to intercept CR4 changes to
715 * these bits and handle it depending on whether we expose them to the guest.
716 */
717 PVM pVM = pVCpu->CTX_SUFF(pVM);
718 bool const fXSaveRstor = pVM->cpum.ro.HostFeatures.fXSaveRstor;
719 bool const fPcid = pVM->cpum.ro.HostFeatures.fPcid;
720 return ( X86_CR4_VMXE
721 | X86_CR4_VME
722 | X86_CR4_PAE
723 | X86_CR4_PGE
724 | X86_CR4_PSE
725 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
726 | (fPcid ? X86_CR4_PCIDE : 0));
727}
728
729
730/**
731 * Returns whether the the VM-exit MSR-store area differs from the VM-exit MSR-load
732 * area.
733 *
734 * @returns @c true if it's different, @c false otherwise.
735 * @param pVmcsInfo The VMCS info. object.
736 */
737DECL_FORCE_INLINE(bool) hmR0VmxIsSeparateExitMsrStoreAreaVmcs(PCVMXVMCSINFO pVmcsInfo)
738{
739 return RT_BOOL( pVmcsInfo->pvGuestMsrStore != pVmcsInfo->pvGuestMsrLoad
740 && pVmcsInfo->pvGuestMsrStore);
741}
742
743
744/**
745 * Adds one or more exceptions to the exception bitmap and commits it to the current
746 * VMCS.
747 *
748 * @returns VBox status code.
749 * @param pVmxTransient The VMX-transient structure.
750 * @param uXcptMask The exception(s) to add.
751 */
752static int hmR0VmxAddXcptInterceptMask(PVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
753{
754 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
755 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
756 if ((uXcptBitmap & uXcptMask) != uXcptMask)
757 {
758 uXcptBitmap |= uXcptMask;
759 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
760 AssertRCReturn(rc, rc);
761 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
762 }
763 return VINF_SUCCESS;
764}
765
766
767/**
768 * Adds an exception to the exception bitmap and commits it to the current VMCS.
769 *
770 * @returns VBox status code.
771 * @param pVmxTransient The VMX-transient structure.
772 * @param uXcpt The exception to add.
773 */
774static int hmR0VmxAddXcptIntercept(PVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
775{
776 Assert(uXcpt <= X86_XCPT_LAST);
777 return hmR0VmxAddXcptInterceptMask(pVmxTransient, RT_BIT_32(uXcpt));
778}
779
780
781/**
782 * Remove one or more exceptions from the exception bitmap and commits it to the
783 * current VMCS.
784 *
785 * This takes care of not removing the exception intercept if a nested-guest
786 * requires the exception to be intercepted.
787 *
788 * @returns VBox status code.
789 * @param pVCpu The cross context virtual CPU structure.
790 * @param pVmxTransient The VMX-transient structure.
791 * @param uXcptMask The exception(s) to remove.
792 */
793static int hmR0VmxRemoveXcptInterceptMask(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
794{
795 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
796 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
797 if (u32XcptBitmap & uXcptMask)
798 {
799#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
800 if (!pVmxTransient->fIsNestedGuest)
801 { /* likely */ }
802 else
803 {
804 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
805 uXcptMask &= ~pVmcsNstGst->u32XcptBitmap;
806 }
807#endif
808#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
809 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
810 | RT_BIT(X86_XCPT_DE)
811 | RT_BIT(X86_XCPT_NM)
812 | RT_BIT(X86_XCPT_TS)
813 | RT_BIT(X86_XCPT_UD)
814 | RT_BIT(X86_XCPT_NP)
815 | RT_BIT(X86_XCPT_SS)
816 | RT_BIT(X86_XCPT_GP)
817 | RT_BIT(X86_XCPT_PF)
818 | RT_BIT(X86_XCPT_MF));
819#elif defined(HMVMX_ALWAYS_TRAP_PF)
820 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
821#endif
822 if (uXcptMask)
823 {
824 /* Validate we are not removing any essential exception intercepts. */
825 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF))); RT_NOREF(pVCpu);
826 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
827 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
828
829 /* Remove it from the exception bitmap. */
830 u32XcptBitmap &= ~uXcptMask;
831
832 /* Commit and update the cache if necessary. */
833 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
834 {
835 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
836 AssertRCReturn(rc, rc);
837 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
838 }
839 }
840 }
841 return VINF_SUCCESS;
842}
843
844
845/**
846 * Remove an exceptions from the exception bitmap and commits it to the current
847 * VMCS.
848 *
849 * @returns VBox status code.
850 * @param pVCpu The cross context virtual CPU structure.
851 * @param pVmxTransient The VMX-transient structure.
852 * @param uXcpt The exception to remove.
853 */
854static int hmR0VmxRemoveXcptIntercept(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
855{
856 return hmR0VmxRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
857}
858
859
860/**
861 * Loads the VMCS specified by the VMCS info. object.
862 *
863 * @returns VBox status code.
864 * @param pVmcsInfo The VMCS info. object.
865 */
866static int hmR0VmxLoadVmcs(PVMXVMCSINFO pVmcsInfo)
867{
868 Assert(pVmcsInfo);
869 Assert(pVmcsInfo->HCPhysVmcs);
870 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
871
872 if (pVmcsInfo->fVmcsState & VMX_V_VMCS_LAUNCH_STATE_CLEAR)
873 {
874 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysVmcs);
875 if (RT_SUCCESS(rc))
876 {
877 pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
878 return VINF_SUCCESS;
879 }
880 return rc;
881 }
882 return VERR_VMX_INVALID_VMCS_LAUNCH_STATE;
883}
884
885
886/**
887 * Clears the VMCS specified by the VMCS info. object.
888 *
889 * @returns VBox status code.
890 * @param pVmcsInfo The VMCS info. object.
891 */
892static int hmR0VmxClearVmcs(PVMXVMCSINFO pVmcsInfo)
893{
894 Assert(pVmcsInfo);
895 Assert(pVmcsInfo->HCPhysVmcs);
896 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
897
898 int rc = VMXClearVmcs(pVmcsInfo->HCPhysVmcs);
899 if (RT_SUCCESS(rc))
900 pVmcsInfo->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
901 return rc;
902}
903
904
905#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
906/**
907 * Switches the current VMCS to the one specified.
908 *
909 * @returns VBox status code.
910 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
911 * @param pVmcsInfoTo The VMCS info. object we are switching to.
912 *
913 * @remarks Called with interrupts disabled.
914 */
915static int hmR0VmxSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
916{
917 Assert(pVmcsInfoFrom);
918 Assert(pVmcsInfoTo);
919
920 /*
921 * Clear the VMCS we are switching out if it has not already been cleared.
922 * This will sync any CPU internal data back to the VMCS.
923 */
924 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
925 {
926 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
927 if (RT_SUCCESS(rc))
928 { /* likely */ }
929 else
930 return rc;
931 }
932
933 /*
934 * Clear the VMCS we are switching to if it has not already been cleared.
935 * This will initialize the VMCS launch state to "clear" required for loading it.
936 *
937 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
938 */
939 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
940 {
941 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
942 if (RT_SUCCESS(rc))
943 { /* likely */ }
944 else
945 return rc;
946 }
947
948 /*
949 * Finally, load the VMCS we are switching to.
950 */
951 return hmR0VmxLoadVmcs(pVmcsInfoTo);
952}
953#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
954
955
956/**
957 * Updates the VM's last error record.
958 *
959 * If there was a VMX instruction error, reads the error data from the VMCS and
960 * updates VCPU's last error record as well.
961 *
962 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
963 * Can be NULL if @a rc is not VERR_VMX_UNABLE_TO_START_VM or
964 * VERR_VMX_INVALID_VMCS_FIELD.
965 * @param rc The error code.
966 */
967static void hmR0VmxUpdateErrorRecord(PVMCPU pVCpu, int rc)
968{
969 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
970 || rc == VERR_VMX_UNABLE_TO_START_VM)
971 {
972 AssertPtrReturnVoid(pVCpu);
973 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
974 }
975 pVCpu->CTX_SUFF(pVM)->hm.s.rcInit = rc;
976}
977
978
979#ifdef VBOX_STRICT
980/**
981 * Reads the VM-entry interruption-information field from the VMCS into the VMX
982 * transient structure.
983 *
984 * @returns VBox status code.
985 * @param pVmxTransient The VMX-transient structure.
986 *
987 * @remarks No-long-jump zone!!!
988 */
989DECLINLINE(int) hmR0VmxReadEntryIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
990{
991 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
992 AssertRCReturn(rc, rc);
993 return VINF_SUCCESS;
994}
995
996
997/**
998 * Reads the VM-entry exception error code field from the VMCS into
999 * the VMX transient structure.
1000 *
1001 * @returns VBox status code.
1002 * @param pVmxTransient The VMX-transient structure.
1003 *
1004 * @remarks No-long-jump zone!!!
1005 */
1006DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
1007{
1008 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1009 AssertRCReturn(rc, rc);
1010 return VINF_SUCCESS;
1011}
1012
1013
1014/**
1015 * Reads the VM-entry exception error code field from the VMCS into
1016 * the VMX transient structure.
1017 *
1018 * @returns VBox status code.
1019 * @param pVmxTransient The VMX-transient structure.
1020 *
1021 * @remarks No-long-jump zone!!!
1022 */
1023DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
1024{
1025 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1026 AssertRCReturn(rc, rc);
1027 return VINF_SUCCESS;
1028}
1029#endif /* VBOX_STRICT */
1030
1031
1032/**
1033 * Reads the VM-exit interruption-information field from the VMCS into the VMX
1034 * transient structure.
1035 *
1036 * @returns VBox status code.
1037 * @param pVmxTransient The VMX-transient structure.
1038 */
1039DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
1040{
1041 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1042 {
1043 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1044 AssertRCReturn(rc,rc);
1045 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
1046 }
1047 return VINF_SUCCESS;
1048}
1049
1050
1051/**
1052 * Reads the VM-exit interruption error code from the VMCS into the VMX
1053 * transient structure.
1054 *
1055 * @returns VBox status code.
1056 * @param pVmxTransient The VMX-transient structure.
1057 */
1058DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
1059{
1060 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1061 {
1062 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1063 AssertRCReturn(rc, rc);
1064 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1065 }
1066 return VINF_SUCCESS;
1067}
1068
1069
1070/**
1071 * Reads the VM-exit instruction length field from the VMCS into the VMX
1072 * transient structure.
1073 *
1074 * @returns VBox status code.
1075 * @param pVmxTransient The VMX-transient structure.
1076 */
1077DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
1078{
1079 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1080 {
1081 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
1082 AssertRCReturn(rc, rc);
1083 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1084 }
1085 return VINF_SUCCESS;
1086}
1087
1088
1089/**
1090 * Reads the VM-exit instruction-information field from the VMCS into
1091 * the VMX transient structure.
1092 *
1093 * @returns VBox status code.
1094 * @param pVmxTransient The VMX-transient structure.
1095 */
1096DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient)
1097{
1098 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1099 {
1100 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1101 AssertRCReturn(rc, rc);
1102 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1103 }
1104 return VINF_SUCCESS;
1105}
1106
1107
1108/**
1109 * Reads the VM-exit Qualification from the VMCS into the VMX transient structure.
1110 *
1111 * @returns VBox status code.
1112 * @param pVCpu The cross context virtual CPU structure of the
1113 * calling EMT. (Required for the VMCS cache case.)
1114 * @param pVmxTransient The VMX-transient structure.
1115 */
1116DECLINLINE(int) hmR0VmxReadExitQualVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
1117{
1118 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1119 {
1120 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual); NOREF(pVCpu);
1121 AssertRCReturn(rc, rc);
1122 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1123 }
1124 return VINF_SUCCESS;
1125}
1126
1127
1128/**
1129 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1130 *
1131 * @returns VBox status code.
1132 * @param pVCpu The cross context virtual CPU structure of the
1133 * calling EMT. (Required for the VMCS cache case.)
1134 * @param pVmxTransient The VMX-transient structure.
1135 */
1136DECLINLINE(int) hmR0VmxReadGuestLinearAddrVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
1137{
1138 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1139 {
1140 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr); NOREF(pVCpu);
1141 AssertRCReturn(rc, rc);
1142 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1143 }
1144 return VINF_SUCCESS;
1145}
1146
1147
1148/**
1149 * Reads the IDT-vectoring information field from the VMCS into the VMX
1150 * transient structure.
1151 *
1152 * @returns VBox status code.
1153 * @param pVmxTransient The VMX-transient structure.
1154 *
1155 * @remarks No-long-jump zone!!!
1156 */
1157DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
1158{
1159 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1160 {
1161 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1162 AssertRCReturn(rc, rc);
1163 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1164 }
1165 return VINF_SUCCESS;
1166}
1167
1168
1169/**
1170 * Reads the IDT-vectoring error code from the VMCS into the VMX
1171 * transient structure.
1172 *
1173 * @returns VBox status code.
1174 * @param pVmxTransient The VMX-transient structure.
1175 */
1176DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
1177{
1178 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1179 {
1180 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1181 AssertRCReturn(rc, rc);
1182 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1183 }
1184 return VINF_SUCCESS;
1185}
1186
1187
1188/**
1189 * Enters VMX root mode operation on the current CPU.
1190 *
1191 * @returns VBox status code.
1192 * @param pVM The cross context VM structure. Can be
1193 * NULL, after a resume.
1194 * @param HCPhysCpuPage Physical address of the VMXON region.
1195 * @param pvCpuPage Pointer to the VMXON region.
1196 */
1197static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
1198{
1199 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
1200 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
1201 Assert(pvCpuPage);
1202 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1203
1204 if (pVM)
1205 {
1206 /* Write the VMCS revision identifier to the VMXON region. */
1207 *(uint32_t *)pvCpuPage = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID);
1208 }
1209
1210 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */
1211 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1212
1213 /* Enable the VMX bit in CR4 if necessary. */
1214 RTCCUINTREG const uOldCr4 = SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);
1215
1216 /* Enter VMX root mode. */
1217 int rc = VMXEnable(HCPhysCpuPage);
1218 if (RT_FAILURE(rc))
1219 {
1220 if (!(uOldCr4 & X86_CR4_VMXE))
1221 SUPR0ChangeCR4(0 /* fOrMask */, ~X86_CR4_VMXE);
1222
1223 if (pVM)
1224 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
1225 }
1226
1227 /* Restore interrupts. */
1228 ASMSetFlags(fEFlags);
1229 return rc;
1230}
1231
1232
1233/**
1234 * Exits VMX root mode operation on the current CPU.
1235 *
1236 * @returns VBox status code.
1237 */
1238static int hmR0VmxLeaveRootMode(void)
1239{
1240 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1241
1242 /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */
1243 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1244
1245 /* If we're for some reason not in VMX root mode, then don't leave it. */
1246 RTCCUINTREG const uHostCR4 = ASMGetCR4();
1247
1248 int rc;
1249 if (uHostCR4 & X86_CR4_VMXE)
1250 {
1251 /* Exit VMX root mode and clear the VMX bit in CR4. */
1252 VMXDisable();
1253 SUPR0ChangeCR4(0 /* fOrMask */, ~X86_CR4_VMXE);
1254 rc = VINF_SUCCESS;
1255 }
1256 else
1257 rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE;
1258
1259 /* Restore interrupts. */
1260 ASMSetFlags(fEFlags);
1261 return rc;
1262}
1263
1264
1265/**
1266 * Allocates and maps a physically contiguous page. The allocated page is
1267 * zero'd out (used by various VT-x structures).
1268 *
1269 * @returns IPRT status code.
1270 * @param pMemObj Pointer to the ring-0 memory object.
1271 * @param ppVirt Where to store the virtual address of the
1272 * allocation.
1273 * @param pHCPhys Where to store the physical address of the
1274 * allocation.
1275 */
1276static int hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
1277{
1278 AssertPtr(pMemObj);
1279 AssertPtr(ppVirt);
1280 AssertPtr(pHCPhys);
1281 int rc = RTR0MemObjAllocCont(pMemObj, X86_PAGE_4K_SIZE, false /* fExecutable */);
1282 if (RT_FAILURE(rc))
1283 return rc;
1284 *ppVirt = RTR0MemObjAddress(*pMemObj);
1285 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
1286 ASMMemZero32(*ppVirt, X86_PAGE_4K_SIZE);
1287 return VINF_SUCCESS;
1288}
1289
1290
1291/**
1292 * Frees and unmaps an allocated, physical page.
1293 *
1294 * @param pMemObj Pointer to the ring-0 memory object.
1295 * @param ppVirt Where to re-initialize the virtual address of
1296 * allocation as 0.
1297 * @param pHCPhys Where to re-initialize the physical address of the
1298 * allocation as 0.
1299 */
1300static void hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
1301{
1302 AssertPtr(pMemObj);
1303 AssertPtr(ppVirt);
1304 AssertPtr(pHCPhys);
1305 /* NULL is valid, accepted and ignored by the free function below. */
1306 RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
1307 *pMemObj = NIL_RTR0MEMOBJ;
1308 *ppVirt = NULL;
1309 *pHCPhys = NIL_RTHCPHYS;
1310}
1311
1312
1313/**
1314 * Initializes a VMCS info. object.
1315 *
1316 * @param pVmcsInfo The VMCS info. object.
1317 */
1318static void hmR0VmxInitVmcsInfo(PVMXVMCSINFO pVmcsInfo)
1319{
1320 memset(pVmcsInfo, 0, sizeof(*pVmcsInfo));
1321
1322 Assert(pVmcsInfo->hMemObjVmcs == NIL_RTR0MEMOBJ);
1323 Assert(pVmcsInfo->hMemObjMsrBitmap == NIL_RTR0MEMOBJ);
1324 Assert(pVmcsInfo->hMemObjGuestMsrLoad == NIL_RTR0MEMOBJ);
1325 Assert(pVmcsInfo->hMemObjGuestMsrStore == NIL_RTR0MEMOBJ);
1326 Assert(pVmcsInfo->hMemObjHostMsrLoad == NIL_RTR0MEMOBJ);
1327 pVmcsInfo->HCPhysVmcs = NIL_RTHCPHYS;
1328 pVmcsInfo->HCPhysMsrBitmap = NIL_RTHCPHYS;
1329 pVmcsInfo->HCPhysGuestMsrLoad = NIL_RTHCPHYS;
1330 pVmcsInfo->HCPhysGuestMsrStore = NIL_RTHCPHYS;
1331 pVmcsInfo->HCPhysHostMsrLoad = NIL_RTHCPHYS;
1332 pVmcsInfo->HCPhysVirtApic = NIL_RTHCPHYS;
1333 pVmcsInfo->HCPhysEPTP = NIL_RTHCPHYS;
1334 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
1335}
1336
1337
1338/**
1339 * Frees the VT-x structures for a VMCS info. object.
1340 *
1341 * @param pVM The cross context VM structure.
1342 * @param pVmcsInfo The VMCS info. object.
1343 */
1344static void hmR0VmxFreeVmcsInfo(PVM pVM, PVMXVMCSINFO pVmcsInfo)
1345{
1346 hmR0VmxPageFree(&pVmcsInfo->hMemObjVmcs, &pVmcsInfo->pvVmcs, &pVmcsInfo->HCPhysVmcs);
1347
1348 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
1349 hmR0VmxPageFree(&pVmcsInfo->hMemObjMsrBitmap, &pVmcsInfo->pvMsrBitmap, &pVmcsInfo->HCPhysMsrBitmap);
1350
1351 hmR0VmxPageFree(&pVmcsInfo->hMemObjHostMsrLoad, &pVmcsInfo->pvHostMsrLoad, &pVmcsInfo->HCPhysHostMsrLoad);
1352 hmR0VmxPageFree(&pVmcsInfo->hMemObjGuestMsrLoad, &pVmcsInfo->pvGuestMsrLoad, &pVmcsInfo->HCPhysGuestMsrLoad);
1353 hmR0VmxPageFree(&pVmcsInfo->hMemObjGuestMsrStore, &pVmcsInfo->pvGuestMsrStore, &pVmcsInfo->HCPhysGuestMsrStore);
1354
1355 hmR0VmxInitVmcsInfo(pVmcsInfo);
1356}
1357
1358
1359/**
1360 * Allocates the VT-x structures for a VMCS info. object.
1361 *
1362 * @returns VBox status code.
1363 * @param pVCpu The cross context virtual CPU structure.
1364 * @param pVmcsInfo The VMCS info. object.
1365 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1366 */
1367static int hmR0VmxAllocVmcsInfo(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1368{
1369 PVM pVM = pVCpu->CTX_SUFF(pVM);
1370
1371 /* Allocate the guest VM control structure (VMCS). */
1372 int rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjVmcs, &pVmcsInfo->pvVmcs, &pVmcsInfo->HCPhysVmcs);
1373 if (RT_SUCCESS(rc))
1374 {
1375 if (!fIsNstGstVmcs)
1376 {
1377 /* Get the allocated virtual-APIC page from the virtual APIC device. */
1378 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1379 && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW))
1380 {
1381 rc = APICGetApicPageForCpu(pVCpu, &pVmcsInfo->HCPhysVirtApic, (PRTR0PTR)&pVmcsInfo->pbVirtApic,
1382 NULL /* pR3Ptr */, NULL /* pRCPtr */);
1383 }
1384 }
1385 else
1386 {
1387 Assert(pVmcsInfo->HCPhysVirtApic == NIL_RTHCPHYS);
1388 Assert(!pVmcsInfo->pbVirtApic);
1389 }
1390
1391 if (RT_SUCCESS(rc))
1392 {
1393 /*
1394 * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for
1395 * transparent accesses of specific MSRs.
1396 *
1397 * If the condition for enabling MSR bitmaps changes here, don't forget to
1398 * update HMIsMsrBitmapActive().
1399 *
1400 * We don't share MSR bitmaps between the guest and nested-guest as we then
1401 * don't need to care about carefully restoring the guest MSR bitmap.
1402 * The guest visible nested-guest MSR bitmap needs to remain unchanged.
1403 * Hence, allocate a separate MSR bitmap for the guest and nested-guest.
1404 */
1405 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
1406 {
1407 rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjMsrBitmap, &pVmcsInfo->pvMsrBitmap, &pVmcsInfo->HCPhysMsrBitmap);
1408 if (RT_SUCCESS(rc))
1409 ASMMemFill32(pVmcsInfo->pvMsrBitmap, X86_PAGE_4K_SIZE, UINT32_C(0xffffffff));
1410 }
1411
1412 if (RT_SUCCESS(rc))
1413 {
1414 /*
1415 * Allocate the VM-entry MSR-load area for the guest MSRs.
1416 *
1417 * Similar to MSR-bitmaps, we do not share the auto MSR-load/store are between
1418 * the guest and nested-guest.
1419 */
1420 rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjGuestMsrLoad, &pVmcsInfo->pvGuestMsrLoad,
1421 &pVmcsInfo->HCPhysGuestMsrLoad);
1422 if (RT_SUCCESS(rc))
1423 {
1424 /*
1425 * We use the same page for VM-entry MSR-load and VM-exit MSR store areas.
1426 * These contain the guest MSRs to load on VM-entry and store on VM-exit.
1427 */
1428 Assert(pVmcsInfo->hMemObjGuestMsrStore == NIL_RTR0MEMOBJ);
1429 pVmcsInfo->pvGuestMsrStore = pVmcsInfo->pvGuestMsrLoad;
1430 pVmcsInfo->HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrLoad;
1431
1432 /* Allocate the VM-exit MSR-load page for the host MSRs. */
1433 rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjHostMsrLoad, &pVmcsInfo->pvHostMsrLoad,
1434 &pVmcsInfo->HCPhysHostMsrLoad);
1435 }
1436 }
1437 }
1438 }
1439
1440 return rc;
1441}
1442
1443
1444/**
1445 * Free all VT-x structures for the VM.
1446 *
1447 * @returns IPRT status code.
1448 * @param pVM The cross context VM structure.
1449 */
1450static void hmR0VmxStructsFree(PVM pVM)
1451{
1452#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1453 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
1454#endif
1455 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
1456
1457 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1458 {
1459 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1460 PVMXVMCSINFO pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfo;
1461 hmR0VmxFreeVmcsInfo(pVM, pVmcsInfo);
1462#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1463 if (pVM->cpum.ro.GuestFeatures.fVmx)
1464 {
1465 pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfoNstGst;
1466 hmR0VmxFreeVmcsInfo(pVM, pVmcsInfo);
1467 }
1468#endif
1469 }
1470}
1471
1472
1473/**
1474 * Allocate all VT-x structures for the VM.
1475 *
1476 * @returns IPRT status code.
1477 * @param pVM The cross context VM structure.
1478 */
1479static int hmR0VmxStructsAlloc(PVM pVM)
1480{
1481 /*
1482 * Sanity check the VMCS size reported by the CPU as we assume 4KB allocations.
1483 * The VMCS size cannot be more than 4096 bytes.
1484 *
1485 * See Intel spec. Appendix A.1 "Basic VMX Information".
1486 */
1487 uint32_t const cbVmcs = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_SIZE);
1488 if (cbVmcs <= X86_PAGE_4K_SIZE)
1489 { /* likely */ }
1490 else
1491 {
1492 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE;
1493 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1494 }
1495
1496 /*
1497 * Initialize/check members up-front so we can cleanup en masse on allocation failures.
1498 */
1499#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1500 Assert(pVM->hm.s.vmx.hMemObjScratch == NIL_RTR0MEMOBJ);
1501 Assert(pVM->hm.s.vmx.pbScratch == NULL);
1502 pVM->hm.s.vmx.HCPhysScratch = NIL_RTHCPHYS;
1503#endif
1504
1505 Assert(pVM->hm.s.vmx.hMemObjApicAccess == NIL_RTR0MEMOBJ);
1506 Assert(pVM->hm.s.vmx.pbApicAccess == NULL);
1507 pVM->hm.s.vmx.HCPhysApicAccess = NIL_RTHCPHYS;
1508
1509 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1510 {
1511 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1512 hmR0VmxInitVmcsInfo(&pVCpu->hm.s.vmx.VmcsInfo);
1513 hmR0VmxInitVmcsInfo(&pVCpu->hm.s.vmx.VmcsInfoNstGst);
1514 }
1515
1516 /*
1517 * Allocate per-VM VT-x structures.
1518 */
1519 int rc = VINF_SUCCESS;
1520#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1521 /* Allocate crash-dump magic scratch page. */
1522 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
1523 if (RT_FAILURE(rc))
1524 {
1525 hmR0VmxStructsFree(pVM);
1526 return rc;
1527 }
1528 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
1529 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
1530#endif
1531
1532 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
1533 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
1534 {
1535 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
1536 &pVM->hm.s.vmx.HCPhysApicAccess);
1537 if (RT_FAILURE(rc))
1538 {
1539 hmR0VmxStructsFree(pVM);
1540 return rc;
1541 }
1542 }
1543
1544 /*
1545 * Initialize per-VCPU VT-x structures.
1546 */
1547 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1548 {
1549 /* Allocate the guest VMCS structures. */
1550 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1551 rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hm.s.vmx.VmcsInfo, false /* fIsNstGstVmcs */);
1552 if (RT_SUCCESS(rc))
1553 {
1554#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1555 /* Allocate the nested-guest VMCS structures, when the VMX feature is exposed to the guest. */
1556 if (pVM->cpum.ro.GuestFeatures.fVmx)
1557 {
1558 rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hm.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */);
1559 if (RT_SUCCESS(rc))
1560 { /* likely */ }
1561 else
1562 break;
1563 }
1564#endif
1565 }
1566 else
1567 break;
1568 }
1569
1570 if (RT_FAILURE(rc))
1571 {
1572 hmR0VmxStructsFree(pVM);
1573 return rc;
1574 }
1575
1576 return VINF_SUCCESS;
1577}
1578
1579
1580#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1581/**
1582 * Returns whether an MSR at the given MSR-bitmap offset is intercepted or not.
1583 *
1584 * @returns @c true if the MSR is intercepted, @c false otherwise.
1585 * @param pvMsrBitmap The MSR bitmap.
1586 * @param offMsr The MSR byte offset.
1587 * @param iBit The bit offset from the byte offset.
1588 */
1589DECLINLINE(bool) hmR0VmxIsMsrBitSet(const void *pvMsrBitmap, uint16_t offMsr, int32_t iBit)
1590{
1591 uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap;
1592 Assert(pbMsrBitmap);
1593 Assert(offMsr + (iBit >> 3) <= X86_PAGE_4K_SIZE);
1594 return ASMBitTest(pbMsrBitmap + offMsr, iBit);
1595}
1596#endif
1597
1598
1599/**
1600 * Sets the permission bits for the specified MSR in the given MSR bitmap.
1601 *
1602 * If the passed VMCS is a nested-guest VMCS, this function ensures that the
1603 * read/write intercept is cleared from the MSR bitmap used for hardware-assisted
1604 * VMX execution of the nested-guest, only if nested-guest is also not intercepting
1605 * the read/write access of this MSR.
1606 *
1607 * @param pVCpu The cross context virtual CPU structure.
1608 * @param pVmcsInfo The VMCS info. object.
1609 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1610 * @param idMsr The MSR value.
1611 * @param fMsrpm The MSR permissions (see VMXMSRPM_XXX). This must
1612 * include both a read -and- a write permission!
1613 *
1614 * @sa CPUMGetVmxMsrPermission.
1615 * @remarks Can be called with interrupts disabled.
1616 */
1617static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs, uint32_t idMsr, uint32_t fMsrpm)
1618{
1619 uint8_t *pbMsrBitmap = (uint8_t *)pVmcsInfo->pvMsrBitmap;
1620 Assert(pbMsrBitmap);
1621 Assert(VMXMSRPM_IS_FLAG_VALID(fMsrpm));
1622
1623 /*
1624 * MSR-bitmap Layout:
1625 * Byte index MSR range Interpreted as
1626 * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits.
1627 * 0x400 - 0x7ff 0xc0000000 - 0xc0001fff High MSR read bits.
1628 * 0x800 - 0xbff 0x00000000 - 0x00001fff Low MSR write bits.
1629 * 0xc00 - 0xfff 0xc0000000 - 0xc0001fff High MSR write bits.
1630 *
1631 * A bit corresponding to an MSR within the above range causes a VM-exit
1632 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of
1633 * the MSR range, it always cause a VM-exit.
1634 *
1635 * See Intel spec. 24.6.9 "MSR-Bitmap Address".
1636 */
1637 uint16_t const offBitmapRead = 0;
1638 uint16_t const offBitmapWrite = 0x800;
1639 uint16_t offMsr;
1640 int32_t iBit;
1641 if (idMsr <= UINT32_C(0x00001fff))
1642 {
1643 offMsr = 0;
1644 iBit = idMsr;
1645 }
1646 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
1647 {
1648 offMsr = 0x400;
1649 iBit = idMsr - UINT32_C(0xc0000000);
1650 }
1651 else
1652 AssertMsgFailedReturnVoid(("Invalid MSR %#RX32\n", idMsr));
1653
1654 /*
1655 * Set the MSR read permission.
1656 */
1657 uint16_t const offMsrRead = offBitmapRead + offMsr;
1658 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
1659 if (fMsrpm & VMXMSRPM_ALLOW_RD)
1660 {
1661#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1662 bool const fClear = !fIsNstGstVmcs ? true
1663 : !hmR0VmxIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), offMsrRead, iBit);
1664#else
1665 RT_NOREF2(pVCpu, fIsNstGstVmcs);
1666 bool const fClear = true;
1667#endif
1668 if (fClear)
1669 ASMBitClear(pbMsrBitmap + offMsrRead, iBit);
1670 }
1671 else
1672 ASMBitSet(pbMsrBitmap + offMsrRead, iBit);
1673
1674 /*
1675 * Set the MSR write permission.
1676 */
1677 uint16_t const offMsrWrite = offBitmapWrite + offMsr;
1678 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
1679 if (fMsrpm & VMXMSRPM_ALLOW_WR)
1680 {
1681#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1682 bool const fClear = !fIsNstGstVmcs ? true
1683 : !hmR0VmxIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), offMsrWrite, iBit);
1684#else
1685 RT_NOREF2(pVCpu, fIsNstGstVmcs);
1686 bool const fClear = true;
1687#endif
1688 if (fClear)
1689 ASMBitClear(pbMsrBitmap + offMsrWrite, iBit);
1690 }
1691 else
1692 ASMBitSet(pbMsrBitmap + offMsrWrite, iBit);
1693}
1694
1695
1696/**
1697 * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
1698 * area.
1699 *
1700 * @returns VBox status code.
1701 * @param pVCpu The cross context virtual CPU structure.
1702 * @param pVmcsInfo The VMCS info. object.
1703 * @param cMsrs The number of MSRs.
1704 */
1705static int hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t cMsrs)
1706{
1707 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
1708 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc);
1709 if (RT_LIKELY(cMsrs < cMaxSupportedMsrs))
1710 {
1711 /* Commit the MSR counts to the VMCS and update the cache. */
1712 if (pVmcsInfo->cEntryMsrLoad != cMsrs)
1713 {
1714 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs);
1715 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs);
1716 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs);
1717 AssertRCReturn(rc, rc);
1718
1719 pVmcsInfo->cEntryMsrLoad = cMsrs;
1720 pVmcsInfo->cExitMsrStore = cMsrs;
1721 pVmcsInfo->cExitMsrLoad = cMsrs;
1722 }
1723 return VINF_SUCCESS;
1724 }
1725
1726 LogRel(("Auto-load/store MSR count exceeded! cMsrs=%u MaxSupported=%u\n", cMsrs, cMaxSupportedMsrs));
1727 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
1728 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1729}
1730
1731
1732/**
1733 * Adds a new (or updates the value of an existing) guest/host MSR
1734 * pair to be swapped during the world-switch as part of the
1735 * auto-load/store MSR area in the VMCS.
1736 *
1737 * @returns VBox status code.
1738 * @param pVCpu The cross context virtual CPU structure.
1739 * @param pVmxTransient The VMX-transient structure.
1740 * @param idMsr The MSR.
1741 * @param uGuestMsrValue Value of the guest MSR.
1742 * @param fSetReadWrite Whether to set the guest read/write access of this
1743 * MSR (thus not causing a VM-exit).
1744 * @param fUpdateHostMsr Whether to update the value of the host MSR if
1745 * necessary.
1746 */
1747static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t idMsr, uint64_t uGuestMsrValue,
1748 bool fSetReadWrite, bool fUpdateHostMsr)
1749{
1750 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1751 bool const fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
1752 PVMXAUTOMSR pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
1753 uint32_t cMsrs = pVmcsInfo->cEntryMsrLoad;
1754 uint32_t i;
1755
1756 /* Paranoia. */
1757 Assert(pGuestMsrLoad);
1758
1759 LogFlowFunc(("pVCpu=%p idMsr=%#RX32 uGestMsrValue=%#RX64\n", pVCpu, idMsr, uGuestMsrValue));
1760
1761 /* Check if the MSR already exists in the VM-entry MSR-load area. */
1762 for (i = 0; i < cMsrs; i++)
1763 {
1764 if (pGuestMsrLoad[i].u32Msr == idMsr)
1765 break;
1766 }
1767
1768 bool fAdded = false;
1769 if (i == cMsrs)
1770 {
1771 /* The MSR does not exist, bump the MSR count to make room for the new MSR. */
1772 ++cMsrs;
1773 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
1774 AssertMsgRCReturn(rc, ("Insufficient space to add MSR to VM-entry MSR-load/store area %u\n", idMsr), rc);
1775
1776 /* Set the guest to read/write this MSR without causing VM-exits. */
1777 if ( fSetReadWrite
1778 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
1779 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_ALLOW_RD_WR);
1780
1781 LogFlowFunc(("MSR added, cMsrs now %u\n", cMsrs));
1782 fAdded = true;
1783 }
1784
1785 /* Update the MSR value for the newly added or already existing MSR. */
1786 pGuestMsrLoad[i].u32Msr = idMsr;
1787 pGuestMsrLoad[i].u64Value = uGuestMsrValue;
1788
1789 /* Create the corresponding slot in the VM-exit MSR-store area if we use a different page. */
1790 if (hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
1791 {
1792 PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
1793 pGuestMsrStore[i].u32Msr = idMsr;
1794 pGuestMsrStore[i].u64Value = uGuestMsrValue;
1795 }
1796
1797 /* Update the corresponding slot in the host MSR area. */
1798 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
1799 Assert(pHostMsr != pVmcsInfo->pvGuestMsrLoad);
1800 Assert(pHostMsr != pVmcsInfo->pvGuestMsrStore);
1801 pHostMsr[i].u32Msr = idMsr;
1802
1803 /*
1804 * Only if the caller requests to update the host MSR value AND we've newly added the
1805 * MSR to the host MSR area do we actually update the value. Otherwise, it will be
1806 * updated by hmR0VmxUpdateAutoLoadHostMsrs().
1807 *
1808 * We do this for performance reasons since reading MSRs may be quite expensive.
1809 */
1810 if (fAdded)
1811 {
1812 if (fUpdateHostMsr)
1813 {
1814 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1815 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1816 pHostMsr[i].u64Value = ASMRdMsr(idMsr);
1817 }
1818 else
1819 {
1820 /* Someone else can do the work. */
1821 pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = false;
1822 }
1823 }
1824 return VINF_SUCCESS;
1825}
1826
1827
1828/**
1829 * Removes a guest/host MSR pair to be swapped during the world-switch from the
1830 * auto-load/store MSR area in the VMCS.
1831 *
1832 * @returns VBox status code.
1833 * @param pVCpu The cross context virtual CPU structure.
1834 * @param pVmxTransient The VMX-transient structure.
1835 * @param idMsr The MSR.
1836 */
1837static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t idMsr)
1838{
1839 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1840 bool const fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
1841 PVMXAUTOMSR pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
1842 uint32_t cMsrs = pVmcsInfo->cEntryMsrLoad;
1843
1844 LogFlowFunc(("pVCpu=%p idMsr=%#RX32\n", pVCpu, idMsr));
1845
1846 for (uint32_t i = 0; i < cMsrs; i++)
1847 {
1848 /* Find the MSR. */
1849 if (pGuestMsrLoad[i].u32Msr == idMsr)
1850 {
1851 /*
1852 * If it's the last MSR, we only need to reduce the MSR count.
1853 * If it's -not- the last MSR, copy the last MSR in place of it and reduce the MSR count.
1854 */
1855 if (i < cMsrs - 1)
1856 {
1857 /* Remove it from the VM-entry MSR-load area. */
1858 pGuestMsrLoad[i].u32Msr = pGuestMsrLoad[cMsrs - 1].u32Msr;
1859 pGuestMsrLoad[i].u64Value = pGuestMsrLoad[cMsrs - 1].u64Value;
1860
1861 /* Remove it from the VM-exit MSR-store area if it's in a different page. */
1862 if (hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
1863 {
1864 PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
1865 Assert(pGuestMsrStore[i].u32Msr == idMsr);
1866 pGuestMsrStore[i].u32Msr = pGuestMsrStore[cMsrs - 1].u32Msr;
1867 pGuestMsrStore[i].u64Value = pGuestMsrStore[cMsrs - 1].u64Value;
1868 }
1869
1870 /* Remove it from the VM-exit MSR-load area. */
1871 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
1872 Assert(pHostMsr[i].u32Msr == idMsr);
1873 pHostMsr[i].u32Msr = pHostMsr[cMsrs - 1].u32Msr;
1874 pHostMsr[i].u64Value = pHostMsr[cMsrs - 1].u64Value;
1875 }
1876
1877 /* Reduce the count to reflect the removed MSR and bail. */
1878 --cMsrs;
1879 break;
1880 }
1881 }
1882
1883 /* Update the VMCS if the count changed (meaning the MSR was found and removed). */
1884 if (cMsrs != pVmcsInfo->cEntryMsrLoad)
1885 {
1886 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
1887 AssertRCReturn(rc, rc);
1888
1889 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
1890 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
1891 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
1892
1893 Log4Func(("Removed MSR %#RX32, cMsrs=%u\n", idMsr, cMsrs));
1894 return VINF_SUCCESS;
1895 }
1896
1897 return VERR_NOT_FOUND;
1898}
1899
1900
1901/**
1902 * Checks if the specified guest MSR is part of the VM-entry MSR-load area.
1903 *
1904 * @returns @c true if found, @c false otherwise.
1905 * @param pVmcsInfo The VMCS info. object.
1906 * @param idMsr The MSR to find.
1907 */
1908static bool hmR0VmxIsAutoLoadGuestMsr(PCVMXVMCSINFO pVmcsInfo, uint32_t idMsr)
1909{
1910 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
1911 uint32_t const cMsrs = pVmcsInfo->cEntryMsrLoad;
1912 Assert(pMsrs);
1913 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
1914 for (uint32_t i = 0; i < cMsrs; i++)
1915 {
1916 if (pMsrs[i].u32Msr == idMsr)
1917 return true;
1918 }
1919 return false;
1920}
1921
1922
1923/**
1924 * Updates the value of all host MSRs in the VM-exit MSR-load area.
1925 *
1926 * @param pVCpu The cross context virtual CPU structure.
1927 * @param pVmcsInfo The VMCS info. object.
1928 *
1929 * @remarks No-long-jump zone!!!
1930 */
1931static void hmR0VmxUpdateAutoLoadHostMsrs(PCVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
1932{
1933 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1934
1935 PVMXAUTOMSR pHostMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
1936 uint32_t const cMsrs = pVmcsInfo->cExitMsrLoad;
1937 Assert(pHostMsrLoad);
1938 Assert(sizeof(*pHostMsrLoad) * cMsrs <= X86_PAGE_4K_SIZE);
1939 LogFlowFunc(("pVCpu=%p cMsrs=%u\n", pVCpu, cMsrs));
1940 for (uint32_t i = 0; i < cMsrs; i++)
1941 {
1942 /*
1943 * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
1944 * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
1945 */
1946 if (pHostMsrLoad[i].u32Msr == MSR_K6_EFER)
1947 pHostMsrLoad[i].u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostMsrEfer;
1948 else
1949 pHostMsrLoad[i].u64Value = ASMRdMsr(pHostMsrLoad[i].u32Msr);
1950 }
1951}
1952
1953
1954/**
1955 * Saves a set of host MSRs to allow read/write passthru access to the guest and
1956 * perform lazy restoration of the host MSRs while leaving VT-x.
1957 *
1958 * @param pVCpu The cross context virtual CPU structure.
1959 *
1960 * @remarks No-long-jump zone!!!
1961 */
1962static void hmR0VmxLazySaveHostMsrs(PVMCPU pVCpu)
1963{
1964 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1965
1966 /*
1967 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap accesses in hmR0VmxSetupVmcsProcCtls().
1968 */
1969 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST))
1970 {
1971 Assert(!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)); /* Guest MSRs better not be loaded now. */
1972#if HC_ARCH_BITS == 64
1973 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
1974 {
1975 pVCpu->hm.s.vmx.u64HostMsrLStar = ASMRdMsr(MSR_K8_LSTAR);
1976 pVCpu->hm.s.vmx.u64HostMsrStar = ASMRdMsr(MSR_K6_STAR);
1977 pVCpu->hm.s.vmx.u64HostMsrSfMask = ASMRdMsr(MSR_K8_SF_MASK);
1978 pVCpu->hm.s.vmx.u64HostMsrKernelGsBase = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1979 }
1980#endif
1981 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;
1982 }
1983}
1984
1985
1986/**
1987 * Checks whether the MSR belongs to the set of guest MSRs that we restore
1988 * lazily while leaving VT-x.
1989 *
1990 * @returns true if it does, false otherwise.
1991 * @param pVCpu The cross context virtual CPU structure.
1992 * @param idMsr The MSR to check.
1993 */
1994static bool hmR0VmxIsLazyGuestMsr(PCVMCPU pVCpu, uint32_t idMsr)
1995{
1996 NOREF(pVCpu);
1997#if HC_ARCH_BITS == 64
1998 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
1999 {
2000 switch (idMsr)
2001 {
2002 case MSR_K8_LSTAR:
2003 case MSR_K6_STAR:
2004 case MSR_K8_SF_MASK:
2005 case MSR_K8_KERNEL_GS_BASE:
2006 return true;
2007 }
2008 }
2009#else
2010 RT_NOREF(pVCpu, idMsr);
2011#endif
2012 return false;
2013}
2014
2015
2016/**
2017 * Loads a set of guests MSRs to allow read/passthru to the guest.
2018 *
2019 * The name of this function is slightly confusing. This function does NOT
2020 * postpone loading, but loads the MSR right now. "hmR0VmxLazy" is simply a
2021 * common prefix for functions dealing with "lazy restoration" of the shared
2022 * MSRs.
2023 *
2024 * @param pVCpu The cross context virtual CPU structure.
2025 *
2026 * @remarks No-long-jump zone!!!
2027 */
2028static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu)
2029{
2030 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2031 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2032
2033 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
2034#if HC_ARCH_BITS == 64
2035 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
2036 {
2037 /*
2038 * If the guest MSRs are not loaded -and- if all the guest MSRs are identical
2039 * to the MSRs on the CPU (which are the saved host MSRs, see assertion above) then
2040 * we can skip a few MSR writes.
2041 *
2042 * Otherwise, it implies either 1. they're not loaded, or 2. they're loaded but the
2043 * guest MSR values in the guest-CPU context might be different to what's currently
2044 * loaded in the CPU. In either case, we need to write the new guest MSR values to the
2045 * CPU, see @bugref{8728}.
2046 */
2047 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2048 if ( !(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
2049 && pCtx->msrKERNELGSBASE == pVCpu->hm.s.vmx.u64HostMsrKernelGsBase
2050 && pCtx->msrLSTAR == pVCpu->hm.s.vmx.u64HostMsrLStar
2051 && pCtx->msrSTAR == pVCpu->hm.s.vmx.u64HostMsrStar
2052 && pCtx->msrSFMASK == pVCpu->hm.s.vmx.u64HostMsrSfMask)
2053 {
2054#ifdef VBOX_STRICT
2055 Assert(ASMRdMsr(MSR_K8_KERNEL_GS_BASE) == pCtx->msrKERNELGSBASE);
2056 Assert(ASMRdMsr(MSR_K8_LSTAR) == pCtx->msrLSTAR);
2057 Assert(ASMRdMsr(MSR_K6_STAR) == pCtx->msrSTAR);
2058 Assert(ASMRdMsr(MSR_K8_SF_MASK) == pCtx->msrSFMASK);
2059#endif
2060 }
2061 else
2062 {
2063 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE);
2064 ASMWrMsr(MSR_K8_LSTAR, pCtx->msrLSTAR);
2065 ASMWrMsr(MSR_K6_STAR, pCtx->msrSTAR);
2066 ASMWrMsr(MSR_K8_SF_MASK, pCtx->msrSFMASK);
2067 }
2068 }
2069#endif
2070 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;
2071}
2072
2073
2074/**
2075 * Performs lazy restoration of the set of host MSRs if they were previously
2076 * loaded with guest MSR values.
2077 *
2078 * @param pVCpu The cross context virtual CPU structure.
2079 *
2080 * @remarks No-long-jump zone!!!
2081 * @remarks The guest MSRs should have been saved back into the guest-CPU
2082 * context by hmR0VmxImportGuestState()!!!
2083 */
2084static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu)
2085{
2086 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2087 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2088
2089 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
2090 {
2091 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
2092#if HC_ARCH_BITS == 64
2093 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
2094 {
2095 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64HostMsrLStar);
2096 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64HostMsrStar);
2097 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64HostMsrSfMask);
2098 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostMsrKernelGsBase);
2099 }
2100#endif
2101 }
2102 pVCpu->hm.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST);
2103}
2104
2105
2106/**
2107 * Verifies that our cached values of the VMCS fields are all consistent with
2108 * what's actually present in the VMCS.
2109 *
2110 * @returns VBox status code.
2111 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
2112 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
2113 * VMCS content. HMCPU error-field is
2114 * updated, see VMX_VCI_XXX.
2115 * @param pVCpu The cross context virtual CPU structure.
2116 * @param pVmcsInfo The VMCS info. object.
2117 */
2118static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
2119{
2120 uint32_t u32Val;
2121 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
2122 AssertRCReturn(rc, rc);
2123 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
2124 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32EntryCtls, u32Val),
2125 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_ENTRY,
2126 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2127
2128 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
2129 AssertRCReturn(rc, rc);
2130 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
2131 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32ExitCtls, u32Val),
2132 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_EXIT,
2133 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2134
2135 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
2136 AssertRCReturn(rc, rc);
2137 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
2138 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32PinCtls, u32Val),
2139 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PIN_EXEC,
2140 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2141
2142 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
2143 AssertRCReturn(rc, rc);
2144 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
2145 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32ProcCtls, u32Val),
2146 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC,
2147 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2148
2149 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
2150 {
2151 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
2152 AssertRCReturn(rc, rc);
2153 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
2154 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32ProcCtls2, u32Val),
2155 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
2156 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2157 }
2158
2159 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
2160 AssertRCReturn(rc, rc);
2161 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
2162 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32XcptBitmap, u32Val),
2163 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
2164 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2165
2166 uint64_t u64Val;
2167 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
2168 AssertRCReturn(rc, rc);
2169 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
2170 ("Cache=%#RX64 VMCS=%#RX64\n", pVmcsInfo->u64TscOffset, u64Val),
2171 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
2172 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2173
2174 return VINF_SUCCESS;
2175}
2176
2177
2178#ifdef VBOX_STRICT
2179/**
2180 * Verifies that our cached host EFER MSR value has not changed since we cached it.
2181 *
2182 * @param pVCpu The cross context virtual CPU structure.
2183 * @param pVmcsInfo The VMCS info. object.
2184 */
2185static void hmR0VmxCheckHostEferMsr(PCVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
2186{
2187 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2188
2189 if (pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
2190 {
2191 uint64_t const uHostEferMsr = ASMRdMsr(MSR_K6_EFER);
2192 uint64_t const uHostEferMsrCache = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostMsrEfer;
2193 uint64_t uVmcsEferMsrVmcs;
2194 int rc = VMXReadVmcs64(VMX_VMCS64_HOST_EFER_FULL, &uVmcsEferMsrVmcs);
2195 AssertRC(rc);
2196
2197 AssertMsgReturnVoid(uHostEferMsr == uVmcsEferMsrVmcs,
2198 ("EFER Host/VMCS mismatch! host=%#RX64 vmcs=%#RX64\n", uHostEferMsr, uVmcsEferMsrVmcs));
2199 AssertMsgReturnVoid(uHostEferMsr == uHostEferMsrCache,
2200 ("EFER Host/Cache mismatch! host=%#RX64 cache=%#RX64\n", uHostEferMsr, uHostEferMsrCache));
2201 }
2202}
2203
2204
2205/**
2206 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the
2207 * VMCS are correct.
2208 *
2209 * @param pVCpu The cross context virtual CPU structure.
2210 * @param pVmcsInfo The VMCS info. object.
2211 */
2212static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
2213{
2214 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2215
2216 /* Read the various MSR-area counts from the VMCS. */
2217 uint32_t cEntryLoadMsrs;
2218 uint32_t cExitStoreMsrs;
2219 uint32_t cExitLoadMsrs;
2220 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cEntryLoadMsrs); AssertRC(rc);
2221 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cExitStoreMsrs); AssertRC(rc);
2222 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cExitLoadMsrs); AssertRC(rc);
2223
2224 /* Verify all the MSR counts are the same. */
2225 Assert(cEntryLoadMsrs == cExitStoreMsrs);
2226 Assert(cExitStoreMsrs == cExitLoadMsrs);
2227 uint32_t const cMsrs = cExitLoadMsrs;
2228
2229 /* Verify the MSR counts do not exceed the maximum count supported by the hardware. */
2230 Assert(cMsrs < VMX_MISC_MAX_MSRS(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc));
2231
2232 /* Verify the MSR counts are within the allocated page size. */
2233 Assert(sizeof(VMXAUTOMSR) * cMsrs <= X86_PAGE_4K_SIZE);
2234
2235 /* Verify the relevant contents of the MSR areas match. */
2236 PCVMXAUTOMSR pGuestMsrLoad = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
2237 PCVMXAUTOMSR pGuestMsrStore = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
2238 PCVMXAUTOMSR pHostMsrLoad = (PCVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
2239 bool const fSeparateExitMsrStorePage = hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo);
2240 for (uint32_t i = 0; i < cMsrs; i++)
2241 {
2242 /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */
2243 if (fSeparateExitMsrStorePage)
2244 {
2245 AssertMsgReturnVoid(pGuestMsrLoad->u32Msr == pGuestMsrStore->u32Msr,
2246 ("GuestMsrLoad=%#RX32 GuestMsrStore=%#RX32 cMsrs=%u\n",
2247 pGuestMsrLoad->u32Msr, pGuestMsrStore->u32Msr, cMsrs));
2248 }
2249
2250 AssertMsgReturnVoid(pHostMsrLoad->u32Msr == pGuestMsrLoad->u32Msr,
2251 ("HostMsrLoad=%#RX32 GuestMsrLoad=%#RX32 cMsrs=%u\n",
2252 pHostMsrLoad->u32Msr, pGuestMsrLoad->u32Msr, cMsrs));
2253
2254 uint64_t const u64Msr = ASMRdMsr(pHostMsrLoad->u32Msr);
2255 AssertMsgReturnVoid(pHostMsrLoad->u64Value == u64Msr,
2256 ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
2257 pHostMsrLoad->u32Msr, pHostMsrLoad->u64Value, u64Msr, cMsrs));
2258
2259 /* Verify that cached host EFER MSR matches what's loaded the CPU. */
2260 bool const fIsEferMsr = RT_BOOL(pHostMsrLoad->u32Msr == MSR_K6_EFER);
2261 if (fIsEferMsr)
2262 {
2263 AssertMsgReturnVoid(u64Msr == pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostMsrEfer,
2264 ("Cached=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
2265 pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostMsrEfer, u64Msr, cMsrs));
2266 }
2267
2268 /* Verify that the accesses are as expected in the MSR bitmap for auto-load/store MSRs. */
2269 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
2270 {
2271 uint32_t const fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, pGuestMsrLoad->u32Msr);
2272 if (fIsEferMsr)
2273 {
2274 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_EXIT_RD), ("Passthru read for EFER MSR!?\n"));
2275 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_EXIT_WR), ("Passthru write for EFER MSR!?\n"));
2276 }
2277 else
2278 {
2279 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_ALLOW_RD_WR) == VMXMSRPM_ALLOW_RD_WR,
2280 ("u32Msr=%#RX32 cMsrs=%u No passthru read/write!\n", pGuestMsrLoad->u32Msr, cMsrs));
2281 }
2282 }
2283
2284 /* Move to the next MSR. */
2285 pHostMsrLoad++;
2286 pGuestMsrLoad++;
2287 pGuestMsrStore++;
2288 }
2289}
2290#endif /* VBOX_STRICT */
2291
2292
2293/**
2294 * Flushes the TLB using EPT.
2295 *
2296 * @returns VBox status code.
2297 * @param pVCpu The cross context virtual CPU structure of the calling
2298 * EMT. Can be NULL depending on @a enmTlbFlush.
2299 * @param pVmcsInfo The VMCS info. object. Can be NULL depending on @a
2300 * enmTlbFlush.
2301 * @param enmTlbFlush Type of flush.
2302 *
2303 * @remarks Caller is responsible for making sure this function is called only
2304 * when NestedPaging is supported and providing @a enmTlbFlush that is
2305 * supported by the CPU.
2306 * @remarks Can be called with interrupts disabled.
2307 */
2308static void hmR0VmxFlushEpt(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, VMXTLBFLUSHEPT enmTlbFlush)
2309{
2310 uint64_t au64Descriptor[2];
2311 if (enmTlbFlush == VMXTLBFLUSHEPT_ALL_CONTEXTS)
2312 au64Descriptor[0] = 0;
2313 else
2314 {
2315 Assert(pVCpu);
2316 Assert(pVmcsInfo);
2317 au64Descriptor[0] = pVmcsInfo->HCPhysEPTP;
2318 }
2319 au64Descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
2320
2321 int rc = VMXR0InvEPT(enmTlbFlush, &au64Descriptor[0]);
2322 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %#RHp failed. rc=%Rrc\n", enmTlbFlush, au64Descriptor[0], rc));
2323
2324 if ( RT_SUCCESS(rc)
2325 && pVCpu)
2326 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
2327}
2328
2329
2330/**
2331 * Flushes the TLB using VPID.
2332 *
2333 * @returns VBox status code.
2334 * @param pVCpu The cross context virtual CPU structure of the calling
2335 * EMT. Can be NULL depending on @a enmTlbFlush.
2336 * @param enmTlbFlush Type of flush.
2337 * @param GCPtr Virtual address of the page to flush (can be 0 depending
2338 * on @a enmTlbFlush).
2339 *
2340 * @remarks Can be called with interrupts disabled.
2341 */
2342static void hmR0VmxFlushVpid(PVMCPU pVCpu, VMXTLBFLUSHVPID enmTlbFlush, RTGCPTR GCPtr)
2343{
2344 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid);
2345
2346 uint64_t au64Descriptor[2];
2347 if (enmTlbFlush == VMXTLBFLUSHVPID_ALL_CONTEXTS)
2348 {
2349 au64Descriptor[0] = 0;
2350 au64Descriptor[1] = 0;
2351 }
2352 else
2353 {
2354 AssertPtr(pVCpu);
2355 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
2356 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
2357 au64Descriptor[0] = pVCpu->hm.s.uCurrentAsid;
2358 au64Descriptor[1] = GCPtr;
2359 }
2360
2361 int rc = VMXR0InvVPID(enmTlbFlush, &au64Descriptor[0]);
2362 AssertMsg(rc == VINF_SUCCESS,
2363 ("VMXR0InvVPID %#x %u %RGv failed with %Rrc\n", enmTlbFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
2364
2365 if ( RT_SUCCESS(rc)
2366 && pVCpu)
2367 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
2368 NOREF(rc);
2369}
2370
2371
2372/**
2373 * Invalidates a guest page by guest virtual address. Only relevant for EPT/VPID,
2374 * otherwise there is nothing really to invalidate.
2375 *
2376 * @returns VBox status code.
2377 * @param pVCpu The cross context virtual CPU structure.
2378 * @param GCVirt Guest virtual address of the page to invalidate.
2379 */
2380VMMR0DECL(int) VMXR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
2381{
2382 AssertPtr(pVCpu);
2383 LogFlowFunc(("pVCpu=%p GCVirt=%RGv\n", pVCpu, GCVirt));
2384
2385 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
2386 {
2387 /*
2388 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for
2389 * the EPT case. See @bugref{6043} and @bugref{6177}.
2390 *
2391 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*()
2392 * as this function maybe called in a loop with individual addresses.
2393 */
2394 PVM pVM = pVCpu->CTX_SUFF(pVM);
2395 if (pVM->hm.s.vmx.fVpid)
2396 {
2397 bool fVpidFlush = RT_BOOL(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
2398
2399#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
2400 /*
2401 * Workaround Erratum BV75, AAJ159 and others that affect several Intel CPUs
2402 * where executing INVVPID outside 64-bit mode does not flush translations of
2403 * 64-bit linear addresses, see @bugref{6208#c72}.
2404 */
2405 if (RT_HI_U32(GCVirt))
2406 fVpidFlush = false;
2407#endif
2408
2409 if (fVpidFlush)
2410 {
2411 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_INDIV_ADDR, GCVirt);
2412 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
2413 }
2414 else
2415 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2416 }
2417 else if (pVM->hm.s.fNestedPaging)
2418 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2419 }
2420
2421 return VINF_SUCCESS;
2422}
2423
2424
2425/**
2426 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
2427 * case where neither EPT nor VPID is supported by the CPU.
2428 *
2429 * @param pHostCpu The HM physical-CPU structure.
2430 * @param pVCpu The cross context virtual CPU structure.
2431 *
2432 * @remarks Called with interrupts disabled.
2433 */
2434static void hmR0VmxFlushTaggedTlbNone(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
2435{
2436 AssertPtr(pVCpu);
2437 AssertPtr(pHostCpu);
2438
2439 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
2440
2441 Assert(pHostCpu->idCpu != NIL_RTCPUID);
2442 pVCpu->hm.s.idLastCpu = pHostCpu->idCpu;
2443 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;
2444 pVCpu->hm.s.fForceTLBFlush = false;
2445 return;
2446}
2447
2448
2449/**
2450 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
2451 *
2452 * @param pHostCpu The HM physical-CPU structure.
2453 * @param pVCpu The cross context virtual CPU structure.
2454 * @param pVmcsInfo The VMCS info. object.
2455 *
2456 * @remarks All references to "ASID" in this function pertains to "VPID" in Intel's
2457 * nomenclature. The reason is, to avoid confusion in compare statements
2458 * since the host-CPU copies are named "ASID".
2459 *
2460 * @remarks Called with interrupts disabled.
2461 */
2462static void hmR0VmxFlushTaggedTlbBoth(PHMPHYSCPU pHostCpu, PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
2463{
2464#ifdef VBOX_WITH_STATISTICS
2465 bool fTlbFlushed = false;
2466# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
2467# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
2468 if (!fTlbFlushed) \
2469 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
2470 } while (0)
2471#else
2472# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
2473# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
2474#endif
2475
2476 AssertPtr(pVCpu);
2477 AssertPtr(pHostCpu);
2478 Assert(pHostCpu->idCpu != NIL_RTCPUID);
2479
2480 PVM pVM = pVCpu->CTX_SUFF(pVM);
2481 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
2482 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
2483 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
2484
2485 /*
2486 * Force a TLB flush for the first world-switch if the current CPU differs from the one we
2487 * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID
2488 * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we
2489 * cannot reuse the current ASID anymore.
2490 */
2491 if ( pVCpu->hm.s.idLastCpu != pHostCpu->idCpu
2492 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes)
2493 {
2494 ++pHostCpu->uCurrentAsid;
2495 if (pHostCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
2496 {
2497 pHostCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
2498 pHostCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
2499 pHostCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
2500 }
2501
2502 pVCpu->hm.s.uCurrentAsid = pHostCpu->uCurrentAsid;
2503 pVCpu->hm.s.idLastCpu = pHostCpu->idCpu;
2504 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;
2505
2506 /*
2507 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
2508 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
2509 */
2510 hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVM->hm.s.vmx.enmTlbFlushEpt);
2511 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2512 HMVMX_SET_TAGGED_TLB_FLUSHED();
2513 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
2514 }
2515 else if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) /* Check for explicit TLB flushes. */
2516 {
2517 /*
2518 * Changes to the EPT paging structure by VMM requires flushing-by-EPT as the CPU
2519 * creates guest-physical (ie. only EPT-tagged) mappings while traversing the EPT
2520 * tables when EPT is in use. Flushing-by-VPID will only flush linear (only
2521 * VPID-tagged) and combined (EPT+VPID tagged) mappings but not guest-physical
2522 * mappings, see @bugref{6568}.
2523 *
2524 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information".
2525 */
2526 hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVM->hm.s.vmx.enmTlbFlushEpt);
2527 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2528 HMVMX_SET_TAGGED_TLB_FLUSHED();
2529 }
2530
2531 pVCpu->hm.s.fForceTLBFlush = false;
2532 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
2533
2534 Assert(pVCpu->hm.s.idLastCpu == pHostCpu->idCpu);
2535 Assert(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes);
2536 AssertMsg(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes,
2537 ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pHostCpu->cTlbFlushes));
2538 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2539 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu,
2540 pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2541 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2542 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2543
2544 /* Update VMCS with the VPID. */
2545 int rc = VMXWriteVmcs32(VMX_VMCS16_VPID, pVCpu->hm.s.uCurrentAsid);
2546 AssertRC(rc);
2547
2548#undef HMVMX_SET_TAGGED_TLB_FLUSHED
2549}
2550
2551
2552/**
2553 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
2554 *
2555 * @param pHostCpu The HM physical-CPU structure.
2556 * @param pVCpu The cross context virtual CPU structure.
2557 * @param pVmcsInfo The VMCS info. object.
2558 *
2559 * @remarks Called with interrupts disabled.
2560 */
2561static void hmR0VmxFlushTaggedTlbEpt(PHMPHYSCPU pHostCpu, PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
2562{
2563 AssertPtr(pVCpu);
2564 AssertPtr(pHostCpu);
2565 Assert(pHostCpu->idCpu != NIL_RTCPUID);
2566 AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked without NestedPaging."));
2567 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID."));
2568
2569 /*
2570 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
2571 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
2572 */
2573 if ( pVCpu->hm.s.idLastCpu != pHostCpu->idCpu
2574 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes)
2575 {
2576 pVCpu->hm.s.fForceTLBFlush = true;
2577 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2578 }
2579
2580 /* Check for explicit TLB flushes. */
2581 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2582 {
2583 pVCpu->hm.s.fForceTLBFlush = true;
2584 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2585 }
2586
2587 pVCpu->hm.s.idLastCpu = pHostCpu->idCpu;
2588 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;
2589
2590 if (pVCpu->hm.s.fForceTLBFlush)
2591 {
2592 hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVCpu->CTX_SUFF(pVM)->hm.s.vmx.enmTlbFlushEpt);
2593 pVCpu->hm.s.fForceTLBFlush = false;
2594 }
2595}
2596
2597
2598/**
2599 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
2600 *
2601 * @param pHostCpu The HM physical-CPU structure.
2602 * @param pVCpu The cross context virtual CPU structure.
2603 *
2604 * @remarks Called with interrupts disabled.
2605 */
2606static void hmR0VmxFlushTaggedTlbVpid(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
2607{
2608 AssertPtr(pVCpu);
2609 AssertPtr(pHostCpu);
2610 Assert(pHostCpu->idCpu != NIL_RTCPUID);
2611 AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked without VPID."));
2612 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging"));
2613
2614 /*
2615 * Force a TLB flush for the first world switch if the current CPU differs from the one we
2616 * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID
2617 * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we
2618 * cannot reuse the current ASID anymore.
2619 */
2620 if ( pVCpu->hm.s.idLastCpu != pHostCpu->idCpu
2621 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes)
2622 {
2623 pVCpu->hm.s.fForceTLBFlush = true;
2624 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2625 }
2626
2627 /* Check for explicit TLB flushes. */
2628 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2629 {
2630 /*
2631 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see
2632 * hmR0VmxSetupTaggedTlb()) we would need to explicitly flush in this case (add an
2633 * fExplicitFlush = true here and change the pHostCpu->fFlushAsidBeforeUse check below to
2634 * include fExplicitFlush's too) - an obscure corner case.
2635 */
2636 pVCpu->hm.s.fForceTLBFlush = true;
2637 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2638 }
2639
2640 PVM pVM = pVCpu->CTX_SUFF(pVM);
2641 pVCpu->hm.s.idLastCpu = pHostCpu->idCpu;
2642 if (pVCpu->hm.s.fForceTLBFlush)
2643 {
2644 ++pHostCpu->uCurrentAsid;
2645 if (pHostCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
2646 {
2647 pHostCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
2648 pHostCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
2649 pHostCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
2650 }
2651
2652 pVCpu->hm.s.fForceTLBFlush = false;
2653 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;
2654 pVCpu->hm.s.uCurrentAsid = pHostCpu->uCurrentAsid;
2655 if (pHostCpu->fFlushAsidBeforeUse)
2656 {
2657 if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
2658 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
2659 else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS)
2660 {
2661 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
2662 pHostCpu->fFlushAsidBeforeUse = false;
2663 }
2664 else
2665 {
2666 /* hmR0VmxSetupTaggedTlb() ensures we never get here. Paranoia. */
2667 AssertMsgFailed(("Unsupported VPID-flush context type.\n"));
2668 }
2669 }
2670 }
2671
2672 AssertMsg(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes,
2673 ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pHostCpu->cTlbFlushes));
2674 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2675 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu,
2676 pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2677 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2678 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2679
2680 int rc = VMXWriteVmcs32(VMX_VMCS16_VPID, pVCpu->hm.s.uCurrentAsid);
2681 AssertRC(rc);
2682}
2683
2684
2685/**
2686 * Flushes the guest TLB entry based on CPU capabilities.
2687 *
2688 * @param pHostCpu The HM physical-CPU structure.
2689 * @param pVCpu The cross context virtual CPU structure.
2690 * @param pVmcsInfo The VMCS info. object.
2691 *
2692 * @remarks Called with interrupts disabled.
2693 */
2694static void hmR0VmxFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
2695{
2696#ifdef HMVMX_ALWAYS_FLUSH_TLB
2697 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2698#endif
2699 PVM pVM = pVCpu->CTX_SUFF(pVM);
2700 switch (pVM->hm.s.vmx.enmTlbFlushType)
2701 {
2702 case VMXTLBFLUSHTYPE_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pHostCpu, pVCpu, pVmcsInfo); break;
2703 case VMXTLBFLUSHTYPE_EPT: hmR0VmxFlushTaggedTlbEpt(pHostCpu, pVCpu, pVmcsInfo); break;
2704 case VMXTLBFLUSHTYPE_VPID: hmR0VmxFlushTaggedTlbVpid(pHostCpu, pVCpu); break;
2705 case VMXTLBFLUSHTYPE_NONE: hmR0VmxFlushTaggedTlbNone(pHostCpu, pVCpu); break;
2706 default:
2707 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
2708 break;
2709 }
2710 /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */
2711}
2712
2713
2714/**
2715 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
2716 * TLB entries from the host TLB before VM-entry.
2717 *
2718 * @returns VBox status code.
2719 * @param pVM The cross context VM structure.
2720 */
2721static int hmR0VmxSetupTaggedTlb(PVM pVM)
2722{
2723 /*
2724 * Determine optimal flush type for nested paging.
2725 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup
2726 * unrestricted guest execution (see hmR3InitFinalizeR0()).
2727 */
2728 if (pVM->hm.s.fNestedPaging)
2729 {
2730 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
2731 {
2732 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
2733 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_SINGLE_CONTEXT;
2734 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
2735 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_ALL_CONTEXTS;
2736 else
2737 {
2738 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
2739 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2740 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_FLUSH_TYPE_UNSUPPORTED;
2741 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2742 }
2743
2744 /* Make sure the write-back cacheable memory type for EPT is supported. */
2745 if (RT_UNLIKELY(!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB)))
2746 {
2747 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2748 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_MEM_TYPE_NOT_WB;
2749 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2750 }
2751
2752 /* EPT requires a page-walk length of 4. */
2753 if (RT_UNLIKELY(!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4)))
2754 {
2755 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2756 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_PAGE_WALK_LENGTH_UNSUPPORTED;
2757 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2758 }
2759 }
2760 else
2761 {
2762 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
2763 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2764 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_INVEPT_UNAVAILABLE;
2765 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2766 }
2767 }
2768
2769 /*
2770 * Determine optimal flush type for VPID.
2771 */
2772 if (pVM->hm.s.vmx.fVpid)
2773 {
2774 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
2775 {
2776 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
2777 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_SINGLE_CONTEXT;
2778 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
2779 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_ALL_CONTEXTS;
2780 else
2781 {
2782 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
2783 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2784 LogRelFunc(("Only INDIV_ADDR supported. Ignoring VPID.\n"));
2785 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
2786 LogRelFunc(("Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
2787 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED;
2788 pVM->hm.s.vmx.fVpid = false;
2789 }
2790 }
2791 else
2792 {
2793 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
2794 Log4Func(("VPID supported without INVEPT support. Ignoring VPID.\n"));
2795 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED;
2796 pVM->hm.s.vmx.fVpid = false;
2797 }
2798 }
2799
2800 /*
2801 * Setup the handler for flushing tagged-TLBs.
2802 */
2803 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
2804 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_EPT_VPID;
2805 else if (pVM->hm.s.fNestedPaging)
2806 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_EPT;
2807 else if (pVM->hm.s.vmx.fVpid)
2808 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_VPID;
2809 else
2810 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_NONE;
2811 return VINF_SUCCESS;
2812}
2813
2814
2815/**
2816 * Sets up the virtual-APIC page address for the VMCS.
2817 *
2818 * @returns VBox status code.
2819 * @param pVCpu The cross context virtual CPU structure.
2820 * @param pVmcsInfo The VMCS info. object.
2821 */
2822DECLINLINE(int) hmR0VmxSetupVmcsVirtApicAddr(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
2823{
2824 NOREF(pVCpu); /* Used implicitly by VMXWriteVmcs64 on 32-bit hosts. */
2825 RTHCPHYS const HCPhysVirtApic = pVmcsInfo->HCPhysVirtApic;
2826 Assert(HCPhysVirtApic != NIL_RTHCPHYS);
2827 Assert(!(HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
2828 return VMXWriteVmcs64(VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, HCPhysVirtApic);
2829}
2830
2831
2832/**
2833 * Sets up the MSR-bitmap address for the VMCS.
2834 *
2835 * @returns VBox status code.
2836 * @param pVCpu The cross context virtual CPU structure.
2837 * @param pVmcsInfo The VMCS info. object.
2838 */
2839DECLINLINE(int) hmR0VmxSetupVmcsMsrBitmapAddr(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
2840{
2841 NOREF(pVCpu); /* Used implicitly by VMXWriteVmcs64 on 32-bit hosts. */
2842 RTHCPHYS const HCPhysMsrBitmap = pVmcsInfo->HCPhysMsrBitmap;
2843 Assert(HCPhysMsrBitmap != NIL_RTHCPHYS);
2844 Assert(!(HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
2845 return VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, HCPhysMsrBitmap);
2846}
2847
2848
2849/**
2850 * Sets up the APIC-access page address for the VMCS.
2851 *
2852 * @returns VBox status code.
2853 * @param pVCpu The cross context virtual CPU structure.
2854 */
2855DECLINLINE(int) hmR0VmxSetupVmcsApicAccessAddr(PVMCPU pVCpu)
2856{
2857 RTHCPHYS const HCPhysApicAccess = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.HCPhysApicAccess;
2858 Assert(HCPhysApicAccess != NIL_RTHCPHYS);
2859 Assert(!(HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
2860 return VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, HCPhysApicAccess);
2861}
2862
2863
2864/**
2865 * Sets up the VMCS link pointer for the VMCS.
2866 *
2867 * @returns VBox status code.
2868 * @param pVCpu The cross context virtual CPU structure.
2869 * @param pVmcsInfo The VMCS info. object.
2870 */
2871DECLINLINE(int) hmR0VmxSetupVmcsLinkPtr(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
2872{
2873 NOREF(pVCpu); /* Used implicitly by VMXWriteVmcs64 on 32-bit hosts. */
2874 uint64_t const u64VmcsLinkPtr = pVmcsInfo->u64VmcsLinkPtr;
2875 Assert(u64VmcsLinkPtr == UINT64_C(0xffffffffffffffff)); /* Bits 63:0 MB1. */
2876 return VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, u64VmcsLinkPtr);
2877}
2878
2879
2880/**
2881 * Sets up the VM-entry MSR load, VM-exit MSR-store and VM-exit MSR-load addresses
2882 * in the VMCS.
2883 *
2884 * @returns VBox status code.
2885 * @param pVCpu The cross context virtual CPU structure.
2886 * @param pVmcsInfo The VMCS info. object.
2887 */
2888DECLINLINE(int) hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
2889{
2890 NOREF(pVCpu); /* Used implicitly by VMXWriteVmcs64 on 32-bit hosts. */
2891
2892 RTHCPHYS const HCPhysGuestMsrLoad = pVmcsInfo->HCPhysGuestMsrLoad;
2893 Assert(HCPhysGuestMsrLoad != NIL_RTHCPHYS);
2894 Assert(!(HCPhysGuestMsrLoad & 0xf)); /* Bits 3:0 MBZ. */
2895
2896 RTHCPHYS const HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrStore;
2897 Assert(HCPhysGuestMsrStore != NIL_RTHCPHYS);
2898 Assert(!(HCPhysGuestMsrStore & 0xf)); /* Bits 3:0 MBZ. */
2899
2900 RTHCPHYS const HCPhysHostMsrLoad = pVmcsInfo->HCPhysHostMsrLoad;
2901 Assert(HCPhysHostMsrLoad != NIL_RTHCPHYS);
2902 Assert(!(HCPhysHostMsrLoad & 0xf)); /* Bits 3:0 MBZ. */
2903
2904 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, HCPhysGuestMsrLoad);
2905 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, HCPhysGuestMsrStore);
2906 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, HCPhysHostMsrLoad);
2907 AssertRCReturn(rc, rc);
2908 return VINF_SUCCESS;
2909}
2910
2911
2912/**
2913 * Sets up MSR permissions in the MSR bitmap of a VMCS info. object.
2914 *
2915 * @param pVCpu The cross context virtual CPU structure.
2916 * @param pVmcsInfo The VMCS info. object.
2917 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
2918 */
2919static void hmR0VmxSetupVmcsMsrPermissions(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
2920{
2921 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS);
2922
2923 /*
2924 * The guest can access the following MSRs (read, write) without causing
2925 * VM-exits; they are loaded/stored automatically using fields in the VMCS.
2926 */
2927 PVM pVM = pVCpu->CTX_SUFF(pVM);
2928 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_SYSENTER_CS, VMXMSRPM_ALLOW_RD_WR);
2929 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_SYSENTER_ESP, VMXMSRPM_ALLOW_RD_WR);
2930 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_SYSENTER_EIP, VMXMSRPM_ALLOW_RD_WR);
2931 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_GS_BASE, VMXMSRPM_ALLOW_RD_WR);
2932 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_FS_BASE, VMXMSRPM_ALLOW_RD_WR);
2933
2934 /*
2935 * The IA32_PRED_CMD and IA32_FLUSH_CMD MSRs are write-only and has no state
2936 * associated with then. We never need to intercept access (writes need to be
2937 * executed without causing a VM-exit, reads will #GP fault anyway).
2938 *
2939 * The IA32_SPEC_CTRL MSR is read/write and has state. We allow the guest to
2940 * read/write them. We swap the the guest/host MSR value using the
2941 * auto-load/store MSR area.
2942 */
2943 if (pVM->cpum.ro.GuestFeatures.fIbpb)
2944 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_PRED_CMD, VMXMSRPM_ALLOW_RD_WR);
2945 if (pVM->cpum.ro.GuestFeatures.fFlushCmd)
2946 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_FLUSH_CMD, VMXMSRPM_ALLOW_RD_WR);
2947 if (pVM->cpum.ro.GuestFeatures.fIbrs)
2948 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_SPEC_CTRL, VMXMSRPM_ALLOW_RD_WR);
2949
2950#if HC_ARCH_BITS == 64
2951 /*
2952 * Allow full read/write access for the following MSRs (mandatory for VT-x)
2953 * required for 64-bit guests.
2954 */
2955 if (pVM->hm.s.fAllow64BitGuests)
2956 {
2957 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_LSTAR, VMXMSRPM_ALLOW_RD_WR);
2958 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K6_STAR, VMXMSRPM_ALLOW_RD_WR);
2959 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_SF_MASK, VMXMSRPM_ALLOW_RD_WR);
2960 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_KERNEL_GS_BASE, VMXMSRPM_ALLOW_RD_WR);
2961 }
2962#endif
2963
2964 /*
2965 * IA32_EFER MSR is always intercepted, see @bugref{9180#c37}.
2966 */
2967#ifdef VBOX_STRICT
2968 Assert(pVmcsInfo->pvMsrBitmap);
2969 uint32_t const fMsrpmEfer = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, MSR_K6_EFER);
2970 Assert(fMsrpmEfer == VMXMSRPM_EXIT_RD_WR);
2971#endif
2972}
2973
2974
2975/**
2976 * Sets up pin-based VM-execution controls in the VMCS.
2977 *
2978 * @returns VBox status code.
2979 * @param pVCpu The cross context virtual CPU structure.
2980 * @param pVmcsInfo The VMCS info. object.
2981 */
2982static int hmR0VmxSetupVmcsPinCtls(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
2983{
2984 PVM pVM = pVCpu->CTX_SUFF(pVM);
2985 uint32_t fVal = pVM->hm.s.vmx.Msrs.PinCtls.n.allowed0; /* Bits set here must always be set. */
2986 uint32_t const fZap = pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
2987
2988 fVal |= VMX_PIN_CTLS_EXT_INT_EXIT /* External interrupts cause a VM-exit. */
2989 | VMX_PIN_CTLS_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */
2990
2991 if (pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_PIN_CTLS_VIRT_NMI)
2992 fVal |= VMX_PIN_CTLS_VIRT_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */
2993
2994 /* Enable the VMX-preemption timer. */
2995 if (pVM->hm.s.vmx.fUsePreemptTimer)
2996 {
2997 Assert(pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_PIN_CTLS_PREEMPT_TIMER);
2998 fVal |= VMX_PIN_CTLS_PREEMPT_TIMER;
2999 }
3000
3001#if 0
3002 /* Enable posted-interrupt processing. */
3003 if (pVM->hm.s.fPostedIntrs)
3004 {
3005 Assert(pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_PIN_CTLS_POSTED_INT);
3006 Assert(pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_ACK_EXT_INT);
3007 fVal |= VMX_PIN_CTLS_POSTED_INT;
3008 }
3009#endif
3010
3011 if ((fVal & fZap) != fVal)
3012 {
3013 LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
3014 pVM->hm.s.vmx.Msrs.PinCtls.n.allowed0, fVal, fZap));
3015 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
3016 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3017 }
3018
3019 /* Commit it to the VMCS and update our cache. */
3020 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, fVal);
3021 AssertRCReturn(rc, rc);
3022 pVmcsInfo->u32PinCtls = fVal;
3023
3024 return VINF_SUCCESS;
3025}
3026
3027
3028/**
3029 * Sets up secondary processor-based VM-execution controls in the VMCS.
3030 *
3031 * @returns VBox status code.
3032 * @param pVCpu The cross context virtual CPU structure.
3033 * @param pVmcsInfo The VMCS info. object.
3034 */
3035static int hmR0VmxSetupVmcsProcCtls2(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
3036{
3037 PVM pVM = pVCpu->CTX_SUFF(pVM);
3038 uint32_t fVal = pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed0; /* Bits set here must be set in the VMCS. */
3039 uint32_t const fZap = pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3040
3041 /* WBINVD causes a VM-exit. */
3042 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_WBINVD_EXIT)
3043 fVal |= VMX_PROC_CTLS2_WBINVD_EXIT;
3044
3045 /* Enable EPT (aka nested-paging). */
3046 if (pVM->hm.s.fNestedPaging)
3047 fVal |= VMX_PROC_CTLS2_EPT;
3048
3049 /* Enable the INVPCID instruction if supported by the hardware and we expose
3050 it to the guest. Without this, guest executing INVPCID would cause a #UD. */
3051 if ( (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_INVPCID)
3052 && pVM->cpum.ro.GuestFeatures.fInvpcid)
3053 fVal |= VMX_PROC_CTLS2_INVPCID;
3054
3055 /* Enable VPID. */
3056 if (pVM->hm.s.vmx.fVpid)
3057 fVal |= VMX_PROC_CTLS2_VPID;
3058
3059 /* Enable unrestricted guest execution. */
3060 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3061 fVal |= VMX_PROC_CTLS2_UNRESTRICTED_GUEST;
3062
3063#if 0
3064 if (pVM->hm.s.fVirtApicRegs)
3065 {
3066 /* Enable APIC-register virtualization. */
3067 Assert(pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_APIC_REG_VIRT);
3068 fVal |= VMX_PROC_CTLS2_APIC_REG_VIRT;
3069
3070 /* Enable virtual-interrupt delivery. */
3071 Assert(pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_INTR_DELIVERY);
3072 fVal |= VMX_PROC_CTLS2_VIRT_INTR_DELIVERY;
3073 }
3074#endif
3075
3076 /* Virtualize-APIC accesses if supported by the CPU. The virtual-APIC page is where the TPR shadow resides. */
3077 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
3078 * done dynamically. */
3079 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
3080 {
3081 fVal |= VMX_PROC_CTLS2_VIRT_APIC_ACCESS;
3082 int rc = hmR0VmxSetupVmcsApicAccessAddr(pVCpu);
3083 AssertRCReturn(rc, rc);
3084 }
3085
3086 /* Enable the RDTSCP instruction if supported by the hardware and we expose
3087 it to the guest. Without this, guest executing RDTSCP would cause a #UD. */
3088 if ( (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_RDTSCP)
3089 && pVM->cpum.ro.GuestFeatures.fRdTscP)
3090 fVal |= VMX_PROC_CTLS2_RDTSCP;
3091
3092 /* Enable Pause-Loop exiting. */
3093 if ( (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)
3094 && pVM->hm.s.vmx.cPleGapTicks
3095 && pVM->hm.s.vmx.cPleWindowTicks)
3096 {
3097 fVal |= VMX_PROC_CTLS2_PAUSE_LOOP_EXIT;
3098
3099 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks);
3100 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks);
3101 AssertRCReturn(rc, rc);
3102 }
3103
3104 if ((fVal & fZap) != fVal)
3105 {
3106 LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
3107 pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed0, fVal, fZap));
3108 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
3109 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3110 }
3111
3112 /* Commit it to the VMCS and update our cache. */
3113 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, fVal);
3114 AssertRCReturn(rc, rc);
3115 pVmcsInfo->u32ProcCtls2 = fVal;
3116
3117 return VINF_SUCCESS;
3118}
3119
3120
3121/**
3122 * Sets up processor-based VM-execution controls in the VMCS.
3123 *
3124 * @returns VBox status code.
3125 * @param pVCpu The cross context virtual CPU structure.
3126 * @param pVmcsInfo The VMCS info. object.
3127 */
3128static int hmR0VmxSetupVmcsProcCtls(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
3129{
3130 PVM pVM = pVCpu->CTX_SUFF(pVM);
3131
3132 uint32_t fVal = pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
3133 uint32_t const fZap = pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3134
3135 fVal |= VMX_PROC_CTLS_HLT_EXIT /* HLT causes a VM-exit. */
3136 | VMX_PROC_CTLS_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
3137 | VMX_PROC_CTLS_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
3138 | VMX_PROC_CTLS_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
3139 | VMX_PROC_CTLS_RDPMC_EXIT /* RDPMC causes a VM-exit. */
3140 | VMX_PROC_CTLS_MONITOR_EXIT /* MONITOR causes a VM-exit. */
3141 | VMX_PROC_CTLS_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
3142
3143 /* We toggle VMX_PROC_CTLS_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
3144 if ( !(pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MOV_DR_EXIT)
3145 || (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0 & VMX_PROC_CTLS_MOV_DR_EXIT))
3146 {
3147 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
3148 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3149 }
3150
3151 /* Without nested paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
3152 if (!pVM->hm.s.fNestedPaging)
3153 {
3154 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest);
3155 fVal |= VMX_PROC_CTLS_INVLPG_EXIT
3156 | VMX_PROC_CTLS_CR3_LOAD_EXIT
3157 | VMX_PROC_CTLS_CR3_STORE_EXIT;
3158 }
3159
3160 /* Use TPR shadowing if supported by the CPU. */
3161 if ( PDMHasApic(pVM)
3162 && pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW)
3163 {
3164 fVal |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
3165 /* CR8 writes cause a VM-exit based on TPR threshold. */
3166 Assert(!(fVal & VMX_PROC_CTLS_CR8_STORE_EXIT));
3167 Assert(!(fVal & VMX_PROC_CTLS_CR8_LOAD_EXIT));
3168 int rc = hmR0VmxSetupVmcsVirtApicAddr(pVCpu, pVmcsInfo);
3169 AssertRCReturn(rc, rc);
3170 }
3171 else
3172 {
3173 /* Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is
3174 invalid on 32-bit Intel CPUs. Set this control only for 64-bit guests. */
3175 if (pVM->hm.s.fAllow64BitGuests)
3176 {
3177 fVal |= VMX_PROC_CTLS_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */
3178 | VMX_PROC_CTLS_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */
3179 }
3180 }
3181
3182 /* Use MSR-bitmaps if supported by the CPU. */
3183 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
3184 {
3185 fVal |= VMX_PROC_CTLS_USE_MSR_BITMAPS;
3186 int rc = hmR0VmxSetupVmcsMsrBitmapAddr(pVCpu, pVmcsInfo);
3187 AssertRCReturn(rc, rc);
3188 }
3189
3190 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
3191 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
3192 fVal |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
3193
3194 if ((fVal & fZap) != fVal)
3195 {
3196 LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
3197 pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0, fVal, fZap));
3198 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
3199 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3200 }
3201
3202 /* Commit it to the VMCS and update our cache. */
3203 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, fVal);
3204 AssertRCReturn(rc, rc);
3205 pVmcsInfo->u32ProcCtls = fVal;
3206
3207 /* Set up MSR permissions that don't change through the lifetime of the VM. */
3208 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
3209 hmR0VmxSetupVmcsMsrPermissions(pVCpu, pVmcsInfo, false /* fIsNstGstVmcs */);
3210
3211 /* Set up secondary processor-based VM-execution controls if the CPU supports it. */
3212 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
3213 return hmR0VmxSetupVmcsProcCtls2(pVCpu, pVmcsInfo);
3214
3215 /* Sanity check, should not really happen. */
3216 if (RT_LIKELY(!pVM->hm.s.vmx.fUnrestrictedGuest))
3217 { /* likely */ }
3218 else
3219 {
3220 pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;
3221 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3222 }
3223
3224 /* Old CPUs without secondary processor-based VM-execution controls would end up here. */
3225 return VINF_SUCCESS;
3226}
3227
3228
3229/**
3230 * Sets up miscellaneous (everything other than Pin, Processor and secondary
3231 * Processor-based VM-execution) control fields in the VMCS.
3232 *
3233 * @returns VBox status code.
3234 * @param pVCpu The cross context virtual CPU structure.
3235 * @param pVmcsInfo The VMCS info. object.
3236 */
3237static int hmR0VmxSetupVmcsMiscCtls(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
3238{
3239 /* Set the auto-load/store MSR area addresses in the VMCS. */
3240 int rc = hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(pVCpu, pVmcsInfo);
3241 if (RT_SUCCESS(rc))
3242 {
3243 /* Set the VMCS link pointer in the VMCS. */
3244 rc = hmR0VmxSetupVmcsLinkPtr(pVCpu, pVmcsInfo);
3245 if (RT_SUCCESS(rc))
3246 {
3247 /* Set the CR0/CR4 guest/host mask. */
3248 uint64_t const u64Cr0Mask = hmR0VmxGetFixedCr0Mask(pVCpu);
3249 uint64_t const u64Cr4Mask = hmR0VmxGetFixedCr4Mask(pVCpu);
3250 rc = VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask);
3251 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask);
3252 if (RT_SUCCESS(rc))
3253 {
3254 pVmcsInfo->u64Cr0Mask = u64Cr0Mask;
3255 pVmcsInfo->u64Cr4Mask = u64Cr4Mask;
3256 return VINF_SUCCESS;
3257 }
3258 LogRelFunc(("Failed to initialize VMCS CR0/CR4 guest/host mask. rc=%Rrc\n", rc));
3259 }
3260 else
3261 LogRelFunc(("Failed to initialize VMCS link pointer. rc=%Rrc\n", rc));
3262 }
3263 else
3264 LogRelFunc(("Failed to initialize VMCS auto-load/store MSR addresses. rc=%Rrc\n", rc));
3265 return rc;
3266}
3267
3268
3269/**
3270 * Sets up the initial exception bitmap in the VMCS based on static conditions.
3271 *
3272 * We shall setup those exception intercepts that don't change during the
3273 * lifetime of the VM here. The rest are done dynamically while loading the
3274 * guest state.
3275 *
3276 * @returns VBox status code.
3277 * @param pVCpu The cross context virtual CPU structure.
3278 * @param pVmcsInfo The VMCS info. object.
3279 */
3280static int hmR0VmxSetupVmcsXcptBitmap(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
3281{
3282 /*
3283 * The following exceptions are always intercepted:
3284 *
3285 * #AC - To prevent the guest from hanging the CPU.
3286 * #DB - To maintain the DR6 state even when intercepting DRx reads/writes and
3287 * recursive #DBs can cause a CPU hang.
3288 * #PF - To sync our shadow page tables when nested-paging is not used.
3289 */
3290 bool const fNestedPaging = pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging;
3291 uint32_t const uXcptBitmap = RT_BIT(X86_XCPT_AC)
3292 | RT_BIT(X86_XCPT_DB)
3293 | (fNestedPaging ? 0 : RT_BIT(X86_XCPT_PF));
3294
3295 /* Commit it to the VMCS. */
3296 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
3297 AssertRCReturn(rc, rc);
3298
3299 /* Update our cache of the exception bitmap. */
3300 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
3301 return VINF_SUCCESS;
3302}
3303
3304
3305#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3306/**
3307 * Sets up the VMCS for executing a nested-guest using hardware-assisted VMX.
3308 *
3309 * @returns VBox status code.
3310 * @param pVCpu The cross context virtual CPU structure.
3311 * @param pVmcsInfo The VMCS info. object.
3312 */
3313static int hmR0VmxSetupVmcsCtlsNested(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
3314{
3315 PVM pVM = pVCpu->CTX_SUFF(pVM);
3316 int rc = hmR0VmxSetupVmcsLinkPtr(pVCpu, pVmcsInfo);
3317 if (RT_SUCCESS(rc))
3318 {
3319 rc = hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(pVCpu, pVmcsInfo);
3320 if (RT_SUCCESS(rc))
3321 {
3322 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
3323 rc = hmR0VmxSetupVmcsMsrBitmapAddr(pVCpu, pVmcsInfo);
3324 if (RT_SUCCESS(rc))
3325 {
3326 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
3327 rc = hmR0VmxSetupVmcsApicAccessAddr(pVCpu);
3328 if (RT_SUCCESS(rc))
3329 return VINF_SUCCESS;
3330
3331 LogRelFunc(("Failed to set up the APIC-access address in the nested-guest VMCS. rc=%Rrc\n", rc));
3332 }
3333 else
3334 LogRelFunc(("Failed to set up the MSR-bitmap address in the nested-guest VMCS. rc=%Rrc\n", rc));
3335 }
3336 else
3337 LogRelFunc(("Failed to set up the VMCS link pointer in the nested-guest VMCS. rc=%Rrc\n", rc));
3338 }
3339 else
3340 LogRelFunc(("Failed to set up the auto-load/store MSR addresses in the nested-guest VMCS. rc=%Rrc\n", rc));
3341
3342 return rc;
3343}
3344#endif
3345
3346
3347/**
3348 * Sets up the VMCS for executing a guest (or nested-guest) using hardware-assisted
3349 * VMX.
3350 *
3351 * @returns VBox status code.
3352 * @param pVCpu The cross context virtual CPU structure.
3353 * @param pVmcsInfo The VMCS info. object.
3354 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
3355 */
3356static int hmR0VmxSetupVmcs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
3357{
3358 Assert(pVmcsInfo);
3359 Assert(pVmcsInfo->pvVmcs);
3360 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3361
3362 /* Set the CPU specified revision identifier at the beginning of the VMCS structure. */
3363 PVM pVM = pVCpu->CTX_SUFF(pVM);
3364 *(uint32_t *)pVmcsInfo->pvVmcs = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID);
3365 const char * const pszVmcs = fIsNstGstVmcs ? "nested-guest VMCS" : "guest VMCS";
3366
3367 LogFlowFunc(("\n"));
3368
3369 /*
3370 * Initialize the VMCS using VMCLEAR before loading the VMCS.
3371 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
3372 */
3373 int rc = hmR0VmxClearVmcs(pVmcsInfo);
3374 if (RT_SUCCESS(rc))
3375 {
3376 rc = hmR0VmxLoadVmcs(pVmcsInfo);
3377 if (RT_SUCCESS(rc))
3378 {
3379 if (!fIsNstGstVmcs)
3380 {
3381 rc = hmR0VmxSetupVmcsPinCtls(pVCpu, pVmcsInfo);
3382 if (RT_SUCCESS(rc))
3383 {
3384 rc = hmR0VmxSetupVmcsProcCtls(pVCpu, pVmcsInfo);
3385 if (RT_SUCCESS(rc))
3386 {
3387 rc = hmR0VmxSetupVmcsMiscCtls(pVCpu, pVmcsInfo);
3388 if (RT_SUCCESS(rc))
3389 {
3390 rc = hmR0VmxSetupVmcsXcptBitmap(pVCpu, pVmcsInfo);
3391 if (RT_SUCCESS(rc))
3392 { /* likely */ }
3393 else
3394 LogRelFunc(("Failed to initialize exception bitmap. rc=%Rrc\n", rc));
3395 }
3396 else
3397 LogRelFunc(("Failed to setup miscellaneous controls. rc=%Rrc\n", rc));
3398 }
3399 else
3400 LogRelFunc(("Failed to setup processor-based VM-execution controls. rc=%Rrc\n", rc));
3401 }
3402 else
3403 LogRelFunc(("Failed to setup pin-based controls. rc=%Rrc\n", rc));
3404 }
3405 else
3406 {
3407#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3408 rc = hmR0VmxSetupVmcsCtlsNested(pVCpu, pVmcsInfo);
3409 if (RT_SUCCESS(rc))
3410 { /* likely */ }
3411 else
3412 LogRelFunc(("Failed to initialize nested-guest VMCS. rc=%Rrc\n", rc));
3413#else
3414 AssertFailed();
3415#endif
3416 }
3417 }
3418 else
3419 LogRelFunc(("Failed to load the %s. rc=%Rrc\n", rc, pszVmcs));
3420 }
3421 else
3422 LogRelFunc(("Failed to clear the %s. rc=%Rrc\n", rc, pszVmcs));
3423
3424 /* Sync any CPU internal VMCS data back into our VMCS in memory. */
3425 if (RT_SUCCESS(rc))
3426 {
3427 rc = hmR0VmxClearVmcs(pVmcsInfo);
3428 if (RT_SUCCESS(rc))
3429 { /* likely */ }
3430 else
3431 LogRelFunc(("Failed to clear the %s post setup. rc=%Rrc\n", rc, pszVmcs));
3432 }
3433
3434 /*
3435 * Update the last-error record both for failures and success, so we
3436 * can propagate the status code back to ring-3 for diagnostics.
3437 */
3438 hmR0VmxUpdateErrorRecord(pVCpu, rc);
3439 NOREF(pszVmcs);
3440 return rc;
3441}
3442
3443
3444/**
3445 * Does global VT-x initialization (called during module initialization).
3446 *
3447 * @returns VBox status code.
3448 */
3449VMMR0DECL(int) VMXR0GlobalInit(void)
3450{
3451#ifdef HMVMX_USE_FUNCTION_TABLE
3452 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
3453# ifdef VBOX_STRICT
3454 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
3455 Assert(g_apfnVMExitHandlers[i]);
3456# endif
3457#endif
3458 return VINF_SUCCESS;
3459}
3460
3461
3462/**
3463 * Does global VT-x termination (called during module termination).
3464 */
3465VMMR0DECL(void) VMXR0GlobalTerm()
3466{
3467 /* Nothing to do currently. */
3468}
3469
3470
3471/**
3472 * Sets up and activates VT-x on the current CPU.
3473 *
3474 * @returns VBox status code.
3475 * @param pHostCpu The HM physical-CPU structure.
3476 * @param pVM The cross context VM structure. Can be
3477 * NULL after a host resume operation.
3478 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
3479 * fEnabledByHost is @c true).
3480 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
3481 * @a fEnabledByHost is @c true).
3482 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
3483 * enable VT-x on the host.
3484 * @param pHwvirtMsrs Pointer to the hardware-virtualization MSRs.
3485 */
3486VMMR0DECL(int) VMXR0EnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
3487 PCSUPHWVIRTMSRS pHwvirtMsrs)
3488{
3489 Assert(pHostCpu);
3490 Assert(pHwvirtMsrs);
3491 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3492
3493 /* Enable VT-x if it's not already enabled by the host. */
3494 if (!fEnabledByHost)
3495 {
3496 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
3497 if (RT_FAILURE(rc))
3498 return rc;
3499 }
3500
3501 /*
3502 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been
3503 * using EPTPs) so we don't retain any stale guest-physical mappings which won't get
3504 * invalidated when flushing by VPID.
3505 */
3506 if (pHwvirtMsrs->u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
3507 {
3508 hmR0VmxFlushEpt(NULL /* pVCpu */, NULL /* pVmcsInfo */, VMXTLBFLUSHEPT_ALL_CONTEXTS);
3509 pHostCpu->fFlushAsidBeforeUse = false;
3510 }
3511 else
3512 pHostCpu->fFlushAsidBeforeUse = true;
3513
3514 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
3515 ++pHostCpu->cTlbFlushes;
3516
3517 return VINF_SUCCESS;
3518}
3519
3520
3521/**
3522 * Deactivates VT-x on the current CPU.
3523 *
3524 * @returns VBox status code.
3525 * @param pvCpuPage Pointer to the VMXON region.
3526 * @param HCPhysCpuPage Physical address of the VMXON region.
3527 *
3528 * @remarks This function should never be called when SUPR0EnableVTx() or
3529 * similar was used to enable VT-x on the host.
3530 */
3531VMMR0DECL(int) VMXR0DisableCpu(void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
3532{
3533 RT_NOREF2(pvCpuPage, HCPhysCpuPage);
3534
3535 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3536 return hmR0VmxLeaveRootMode();
3537}
3538
3539
3540/**
3541 * Does per-VM VT-x initialization.
3542 *
3543 * @returns VBox status code.
3544 * @param pVM The cross context VM structure.
3545 */
3546VMMR0DECL(int) VMXR0InitVM(PVM pVM)
3547{
3548 LogFlowFunc(("pVM=%p\n", pVM));
3549
3550 int rc = hmR0VmxStructsAlloc(pVM);
3551 if (RT_FAILURE(rc))
3552 {
3553 LogRelFunc(("Failed to allocated VMX structures. rc=%Rrc\n", rc));
3554 return rc;
3555 }
3556
3557 return VINF_SUCCESS;
3558}
3559
3560
3561/**
3562 * Does per-VM VT-x termination.
3563 *
3564 * @returns VBox status code.
3565 * @param pVM The cross context VM structure.
3566 */
3567VMMR0DECL(int) VMXR0TermVM(PVM pVM)
3568{
3569 LogFlowFunc(("pVM=%p\n", pVM));
3570
3571#ifdef VBOX_WITH_CRASHDUMP_MAGIC
3572 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
3573 {
3574 Assert(pVM->hm.s.vmx.pvScratch);
3575 ASMMemZero32(pVM->hm.s.vmx.pvScratch, X86_PAGE_4K_SIZE);
3576 }
3577#endif
3578 hmR0VmxStructsFree(pVM);
3579 return VINF_SUCCESS;
3580}
3581
3582
3583/**
3584 * Sets up the VM for execution using hardware-assisted VMX.
3585 * This function is only called once per-VM during initialization.
3586 *
3587 * @returns VBox status code.
3588 * @param pVM The cross context VM structure.
3589 */
3590VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
3591{
3592 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
3593 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3594
3595 LogFlowFunc(("pVM=%p\n", pVM));
3596
3597 /*
3598 * At least verify if VMX is enabled, since we can't check if we're in
3599 * VMX root mode or not without causing a #GP.
3600 */
3601 RTCCUINTREG const uHostCR4 = ASMGetCR4();
3602 if (RT_LIKELY(uHostCR4 & X86_CR4_VMXE))
3603 { /* likely */ }
3604 else
3605 return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
3606
3607 /*
3608 * Without unrestricted guest execution, pRealModeTSS and pNonPagingModeEPTPageTable *must*
3609 * always be allocated. We no longer support the highly unlikely case of unrestricted guest
3610 * without pRealModeTSS, see hmR3InitFinalizeR0Intel().
3611 */
3612 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
3613 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
3614 || !pVM->hm.s.vmx.pRealModeTSS))
3615 {
3616 LogRelFunc(("Invalid real-on-v86 state.\n"));
3617 return VERR_INTERNAL_ERROR;
3618 }
3619
3620 /* Initialize these always, see hmR3InitFinalizeR0().*/
3621 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NONE;
3622 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NONE;
3623
3624 /* Setup the tagged-TLB flush handlers. */
3625 int rc = hmR0VmxSetupTaggedTlb(pVM);
3626 if (RT_FAILURE(rc))
3627 {
3628 LogRelFunc(("hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
3629 return rc;
3630 }
3631
3632 /* Check if we can use the VMCS controls for swapping the EFER MSR. */
3633 Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
3634#if HC_ARCH_BITS == 64
3635 if ( (pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1 & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
3636 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_LOAD_EFER_MSR)
3637 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_EFER_MSR))
3638 pVM->hm.s.vmx.fSupportsVmcsEfer = true;
3639#endif
3640
3641 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3642 {
3643 PVMCPU pVCpu = &pVM->aCpus[idCpu];
3644 Log4Func(("pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
3645
3646 rc = hmR0VmxSetupVmcs(pVCpu, &pVCpu->hm.s.vmx.VmcsInfo, false /* fIsNstGstVmcs */);
3647 if (RT_SUCCESS(rc))
3648 {
3649#if HC_ARCH_BITS == 32
3650 hmR0VmxInitVmcsReadCache(pVCpu);
3651#endif
3652#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3653 if (pVM->cpum.ro.GuestFeatures.fVmx)
3654 {
3655 rc = hmR0VmxSetupVmcs(pVCpu, &pVCpu->hm.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */);
3656 if (RT_SUCCESS(rc))
3657 { /* likely */ }
3658 else
3659 {
3660 LogRelFunc(("Nested-guest VMCS setup failed. rc=%Rrc\n", rc));
3661 return rc;
3662 }
3663 }
3664#endif
3665 }
3666 else
3667 {
3668 LogRelFunc(("VMCS setup failed. rc=%Rrc\n", rc));
3669 return rc;
3670 }
3671 }
3672
3673 return VINF_SUCCESS;
3674}
3675
3676
3677#if HC_ARCH_BITS == 32
3678# ifdef VBOX_ENABLE_64_BITS_GUESTS
3679/**
3680 * Check if guest state allows safe use of 32-bit switcher again.
3681 *
3682 * Segment bases and protected mode structures must be 32-bit addressable
3683 * because the 32-bit switcher will ignore high dword when writing these VMCS
3684 * fields. See @bugref{8432} for details.
3685 *
3686 * @returns true if safe, false if must continue to use the 64-bit switcher.
3687 * @param pCtx Pointer to the guest-CPU context.
3688 *
3689 * @remarks No-long-jump zone!!!
3690 */
3691static bool hmR0VmxIs32BitSwitcherSafe(PCCPUMCTX pCtx)
3692{
3693 if (pCtx->gdtr.pGdt & UINT64_C(0xffffffff00000000)) return false;
3694 if (pCtx->idtr.pIdt & UINT64_C(0xffffffff00000000)) return false;
3695 if (pCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000)) return false;
3696 if (pCtx->tr.u64Base & UINT64_C(0xffffffff00000000)) return false;
3697 if (pCtx->es.u64Base & UINT64_C(0xffffffff00000000)) return false;
3698 if (pCtx->cs.u64Base & UINT64_C(0xffffffff00000000)) return false;
3699 if (pCtx->ss.u64Base & UINT64_C(0xffffffff00000000)) return false;
3700 if (pCtx->ds.u64Base & UINT64_C(0xffffffff00000000)) return false;
3701 if (pCtx->fs.u64Base & UINT64_C(0xffffffff00000000)) return false;
3702 if (pCtx->gs.u64Base & UINT64_C(0xffffffff00000000)) return false;
3703
3704 /* All good, bases are 32-bit. */
3705 return true;
3706}
3707# endif /* VBOX_ENABLE_64_BITS_GUESTS */
3708
3709# ifdef VBOX_STRICT
3710static bool hmR0VmxIsValidWriteField(uint32_t idxField)
3711{
3712 switch (idxField)
3713 {
3714 case VMX_VMCS_GUEST_RIP:
3715 case VMX_VMCS_GUEST_RSP:
3716 case VMX_VMCS_GUEST_SYSENTER_EIP:
3717 case VMX_VMCS_GUEST_SYSENTER_ESP:
3718 case VMX_VMCS_GUEST_GDTR_BASE:
3719 case VMX_VMCS_GUEST_IDTR_BASE:
3720 case VMX_VMCS_GUEST_CS_BASE:
3721 case VMX_VMCS_GUEST_DS_BASE:
3722 case VMX_VMCS_GUEST_ES_BASE:
3723 case VMX_VMCS_GUEST_FS_BASE:
3724 case VMX_VMCS_GUEST_GS_BASE:
3725 case VMX_VMCS_GUEST_SS_BASE:
3726 case VMX_VMCS_GUEST_LDTR_BASE:
3727 case VMX_VMCS_GUEST_TR_BASE:
3728 case VMX_VMCS_GUEST_CR3:
3729 return true;
3730 }
3731 return false;
3732}
3733
3734static bool hmR0VmxIsValidReadField(uint32_t idxField)
3735{
3736 switch (idxField)
3737 {
3738 /* Read-only fields. */
3739 case VMX_VMCS_RO_EXIT_QUALIFICATION:
3740 return true;
3741 }
3742 /* Remaining readable fields should also be writable. */
3743 return hmR0VmxIsValidWriteField(idxField);
3744}
3745# endif /* VBOX_STRICT */
3746
3747
3748/**
3749 * Executes the specified handler in 64-bit mode.
3750 *
3751 * @returns VBox status code (no informational status codes).
3752 * @param pVCpu The cross context virtual CPU structure.
3753 * @param enmOp The operation to perform.
3754 * @param cParams Number of parameters.
3755 * @param paParam Array of 32-bit parameters.
3756 */
3757VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cParams, uint32_t *paParam)
3758{
3759 PVM pVM = pVCpu->CTX_SUFF(pVM);
3760 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
3761 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
3762 Assert(pVCpu->hm.s.vmx.VmcsCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VmcsCache.Write.aField));
3763 Assert(pVCpu->hm.s.vmx.VmcsCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VmcsCache.Read.aField));
3764
3765#ifdef VBOX_STRICT
3766 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VmcsCache.Write.cValidEntries; i++)
3767 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VmcsCache.Write.aField[i]));
3768
3769 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VmcsCache.Read.cValidEntries; i++)
3770 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VmcsCache.Read.aField[i]));
3771#endif
3772
3773 /* Disable interrupts. */
3774 RTCCUINTREG fOldEFlags = ASMIntDisableFlags();
3775
3776#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
3777 RTCPUID idHostCpu = RTMpCpuId();
3778 CPUMR0SetLApic(pVCpu, idHostCpu);
3779#endif
3780
3781 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
3782
3783 PCHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
3784 RTHCPHYS const HCPhysCpuPage = pHostCpu->HCPhysMemObj;
3785
3786 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
3787 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
3788 hmR0VmxClearVmcs(pVmcsInfo);
3789
3790 /* Leave VMX root mode and disable VMX. */
3791 VMXDisable();
3792 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
3793
3794 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
3795 CPUMSetHyperEIP(pVCpu, enmOp);
3796 for (int i = (int)cParams - 1; i >= 0; i--)
3797 CPUMPushHyper(pVCpu, paParam[i]);
3798
3799 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
3800
3801 /* Call the switcher. */
3802 int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_UOFFSETOF_DYN(VM, aCpus[pVCpu->idCpu].cpum) - RT_UOFFSETOF(VM, cpum));
3803 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
3804
3805 /* Re-enable VMX to make sure the VMX instructions don't cause #UD faults. */
3806 SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);
3807
3808 /* Re-enter VMX root mode. */
3809 int rc2 = VMXEnable(HCPhysCpuPage);
3810 if (RT_FAILURE(rc2))
3811 {
3812 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
3813 ASMSetFlags(fOldEFlags);
3814 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
3815 return rc2;
3816 }
3817
3818 /* Restore the VMCS as the current VMCS. */
3819 rc2 = hmR0VmxLoadVmcs(pVmcsInfo);
3820 AssertRC(rc2);
3821 Assert(!(ASMGetFlags() & X86_EFL_IF));
3822 ASMSetFlags(fOldEFlags);
3823 return rc;
3824}
3825
3826
3827/**
3828 * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts
3829 * supporting 64-bit guests.
3830 *
3831 * @returns VBox status code.
3832 * @param fResume Whether to VMLAUNCH or VMRESUME.
3833 * @param pCtx Pointer to the guest-CPU context.
3834 * @param pCache Pointer to the VMCS batch cache.
3835 * @param pVM The cross context VM structure.
3836 * @param pVCpu The cross context virtual CPU structure.
3837 */
3838DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
3839{
3840 NOREF(fResume);
3841
3842 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
3843 PCHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
3844 RTHCPHYS const HCPhysCpuPage = pHostCpu->HCPhysMemObj;
3845
3846#ifdef VBOX_WITH_CRASHDUMP_MAGIC
3847 pCache->uPos = 1;
3848 pCache->interPD = PGMGetInterPaeCR3(pVM);
3849 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
3850#endif
3851
3852#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
3853 pCache->TestIn.HCPhysCpuPage = 0;
3854 pCache->TestIn.HCPhysVmcs = 0;
3855 pCache->TestIn.pCache = 0;
3856 pCache->TestOut.HCPhysVmcs = 0;
3857 pCache->TestOut.pCache = 0;
3858 pCache->TestOut.pCtx = 0;
3859 pCache->TestOut.eflags = 0;
3860#else
3861 NOREF(pCache);
3862#endif
3863
3864 uint32_t aParam[10];
3865 aParam[0] = RT_LO_U32(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
3866 aParam[1] = RT_HI_U32(HCPhysCpuPage); /* Param 1: VMXON physical address - Hi. */
3867 aParam[2] = RT_LO_U32(pVmcsInfo->HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
3868 aParam[3] = RT_HI_U32(pVmcsInfo->HCPhysVmcs); /* Param 2: VMCS physical address - Hi. */
3869 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsCache);
3870 aParam[5] = 0;
3871 aParam[6] = VM_RC_ADDR(pVM, pVM);
3872 aParam[7] = 0;
3873 aParam[8] = VM_RC_ADDR(pVM, pVCpu);
3874 aParam[9] = 0;
3875
3876#ifdef VBOX_WITH_CRASHDUMP_MAGIC
3877 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
3878 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
3879#endif
3880 int rc = VMXR0Execute64BitsHandler(pVCpu, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]);
3881
3882#ifdef VBOX_WITH_CRASHDUMP_MAGIC
3883 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
3884 Assert(pCtx->dr[4] == 10);
3885 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
3886#endif
3887
3888#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
3889 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
3890 AssertMsg(pCache->TestIn.HCPhysVmcs == pVmcsInfo->HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
3891 pVmcsInfo->HCPhysVmcs));
3892 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
3893 pCache->TestOut.HCPhysVmcs));
3894 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
3895 pCache->TestOut.pCache));
3896 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsCache),
3897 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsCache)));
3898 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
3899 pCache->TestOut.pCtx));
3900 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
3901#endif
3902 NOREF(pCtx);
3903 return rc;
3904}
3905#endif
3906
3907
3908/**
3909 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
3910 * the VMCS.
3911 *
3912 * @returns VBox status code.
3913 */
3914static int hmR0VmxExportHostControlRegs(void)
3915{
3916 RTCCUINTREG uReg = ASMGetCR0();
3917 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
3918 AssertRCReturn(rc, rc);
3919
3920 uReg = ASMGetCR3();
3921 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
3922 AssertRCReturn(rc, rc);
3923
3924 uReg = ASMGetCR4();
3925 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
3926 AssertRCReturn(rc, rc);
3927 return rc;
3928}
3929
3930
3931/**
3932 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
3933 * the host-state area in the VMCS.
3934 *
3935 * @returns VBox status code.
3936 * @param pVCpu The cross context virtual CPU structure.
3937 */
3938static int hmR0VmxExportHostSegmentRegs(PVMCPU pVCpu)
3939{
3940#if HC_ARCH_BITS == 64
3941/**
3942 * Macro for adjusting host segment selectors to satisfy VT-x's VM-entry
3943 * requirements. See hmR0VmxExportHostSegmentRegs().
3944 */
3945# define VMXLOCAL_ADJUST_HOST_SEG(seg, selValue) \
3946 if ((selValue) & (X86_SEL_RPL | X86_SEL_LDT)) \
3947 { \
3948 bool fValidSelector = true; \
3949 if ((selValue) & X86_SEL_LDT) \
3950 { \
3951 uint32_t uAttr = ASMGetSegAttr((selValue)); \
3952 fValidSelector = RT_BOOL(uAttr != UINT32_MAX && (uAttr & X86_DESC_P)); \
3953 } \
3954 if (fValidSelector) \
3955 { \
3956 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##seg; \
3957 pVCpu->hm.s.vmx.RestoreHost.uHostSel##seg = (selValue); \
3958 } \
3959 (selValue) = 0; \
3960 }
3961
3962 /*
3963 * If we've executed guest code using hardware-assisted VMX, the host-state bits
3964 * will be messed up. We should -not- save the messed up state without restoring
3965 * the original host-state, see @bugref{7240}.
3966 *
3967 * This apparently can happen (most likely the FPU changes), deal with it rather than
3968 * asserting. Was observed booting Solaris 10u10 32-bit guest.
3969 */
3970 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
3971 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
3972 {
3973 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags,
3974 pVCpu->idCpu));
3975 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
3976 }
3977 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
3978#else
3979 RT_NOREF(pVCpu);
3980#endif
3981
3982 /*
3983 * Host DS, ES, FS and GS segment registers.
3984 */
3985#if HC_ARCH_BITS == 64
3986 RTSEL uSelDS = ASMGetDS();
3987 RTSEL uSelES = ASMGetES();
3988 RTSEL uSelFS = ASMGetFS();
3989 RTSEL uSelGS = ASMGetGS();
3990#else
3991 RTSEL uSelDS = 0;
3992 RTSEL uSelES = 0;
3993 RTSEL uSelFS = 0;
3994 RTSEL uSelGS = 0;
3995#endif
3996
3997 /*
3998 * Host CS and SS segment registers.
3999 */
4000 RTSEL uSelCS = ASMGetCS();
4001 RTSEL uSelSS = ASMGetSS();
4002
4003 /*
4004 * Host TR segment register.
4005 */
4006 RTSEL uSelTR = ASMGetTR();
4007
4008#if HC_ARCH_BITS == 64
4009 /*
4010 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to
4011 * gain VM-entry and restore them before we get preempted.
4012 *
4013 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
4014 */
4015 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS);
4016 VMXLOCAL_ADJUST_HOST_SEG(ES, uSelES);
4017 VMXLOCAL_ADJUST_HOST_SEG(FS, uSelFS);
4018 VMXLOCAL_ADJUST_HOST_SEG(GS, uSelGS);
4019# undef VMXLOCAL_ADJUST_HOST_SEG
4020#endif
4021
4022 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
4023 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
4024 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
4025 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
4026 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
4027 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
4028 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
4029 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));
4030 Assert(uSelCS);
4031 Assert(uSelTR);
4032
4033 /* Write these host selector fields into the host-state area in the VMCS. */
4034 int rc = VMXWriteVmcs32(VMX_VMCS16_HOST_CS_SEL, uSelCS);
4035 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_SS_SEL, uSelSS);
4036#if HC_ARCH_BITS == 64
4037 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_DS_SEL, uSelDS);
4038 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_ES_SEL, uSelES);
4039 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FS_SEL, uSelFS);
4040 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_GS_SEL, uSelGS);
4041#else
4042 NOREF(uSelDS);
4043 NOREF(uSelES);
4044 NOREF(uSelFS);
4045 NOREF(uSelGS);
4046#endif
4047 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_TR_SEL, uSelTR);
4048 AssertRCReturn(rc, rc);
4049
4050 /*
4051 * Host GDTR and IDTR.
4052 */
4053 RTGDTR Gdtr;
4054 RTIDTR Idtr;
4055 RT_ZERO(Gdtr);
4056 RT_ZERO(Idtr);
4057 ASMGetGDTR(&Gdtr);
4058 ASMGetIDTR(&Idtr);
4059 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt);
4060 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt);
4061 AssertRCReturn(rc, rc);
4062
4063#if HC_ARCH_BITS == 64
4064 /*
4065 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps
4066 * them to the maximum limit (0xffff) on every VM-exit.
4067 */
4068 if (Gdtr.cbGdt != 0xffff)
4069 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
4070
4071 /*
4072 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT" and
4073 * Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit
4074 * as 0xfff, VT-x bloating the limit to 0xffff shouldn't cause any different CPU behavior.
4075 * However, several hosts either insists on 0xfff being the limit (Windows Patch Guard) or
4076 * uses the limit for other purposes (darwin puts the CPU ID in there but botches sidt
4077 * alignment in at least one consumer). So, we're only allowing the IDTR.LIMIT to be left
4078 * at 0xffff on hosts where we are sure it won't cause trouble.
4079 */
4080# if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
4081 if (Idtr.cbIdt < 0x0fff)
4082# else
4083 if (Idtr.cbIdt != 0xffff)
4084# endif
4085 {
4086 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
4087 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
4088 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
4089 }
4090#endif
4091
4092 /*
4093 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI
4094 * and RPL bits is effectively what the CPU does for "scaling by 8". TI is always 0 and
4095 * RPL should be too in most cases.
4096 */
4097 AssertMsgReturn((uSelTR | X86_SEL_RPL_LDT) <= Gdtr.cbGdt,
4098 ("TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt), VERR_VMX_INVALID_HOST_STATE);
4099
4100 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
4101#if HC_ARCH_BITS == 64
4102 uintptr_t const uTRBase = X86DESC64_BASE(pDesc);
4103
4104 /*
4105 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on
4106 * all VM-exits. The type is the same for 64-bit busy TSS[1]. The limit needs manual
4107 * restoration if the host has something else. Task switching is not supported in 64-bit
4108 * mode[2], but the limit still matters as IOPM is supported in 64-bit mode. Restoring the
4109 * limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
4110 *
4111 * [1] See Intel spec. 3.5 "System Descriptor Types".
4112 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
4113 */
4114 PVM pVM = pVCpu->CTX_SUFF(pVM);
4115 Assert(pDesc->System.u4Type == 11);
4116 if ( pDesc->System.u16LimitLow != 0x67
4117 || pDesc->System.u4LimitHigh)
4118 {
4119 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
4120 /* If the host has made GDT read-only, we would need to temporarily toggle CR0.WP before writing the GDT. */
4121 if (pVM->hm.s.fHostKernelFeatures & SUPKERNELFEATURES_GDT_READ_ONLY)
4122 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_READ_ONLY;
4123 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
4124 }
4125
4126 /*
4127 * Store the GDTR as we need it when restoring the GDT and while restoring the TR.
4128 */
4129 if (pVCpu->hm.s.vmx.fRestoreHostFlags & (VMX_RESTORE_HOST_GDTR | VMX_RESTORE_HOST_SEL_TR))
4130 {
4131 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
4132 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
4133 if (pVM->hm.s.fHostKernelFeatures & SUPKERNELFEATURES_GDT_NEED_WRITABLE)
4134 {
4135 /* The GDT is read-only but the writable GDT is available. */
4136 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_NEED_WRITABLE;
4137 pVCpu->hm.s.vmx.RestoreHost.HostGdtrRw.cb = Gdtr.cbGdt;
4138 rc = SUPR0GetCurrentGdtRw(&pVCpu->hm.s.vmx.RestoreHost.HostGdtrRw.uAddr);
4139 AssertRCReturn(rc, rc);
4140 }
4141 }
4142#else
4143 uintptr_t const uTRBase = X86DESC_BASE(pDesc);
4144#endif
4145 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
4146 AssertRCReturn(rc, rc);
4147
4148 /*
4149 * Host FS base and GS base.
4150 */
4151#if HC_ARCH_BITS == 64
4152 uint64_t const u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
4153 uint64_t const u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
4154 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase);
4155 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase);
4156 AssertRCReturn(rc, rc);
4157
4158 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
4159 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
4160 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
4161 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
4162 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
4163#endif
4164 return VINF_SUCCESS;
4165}
4166
4167
4168/**
4169 * Exports certain host MSRs in the VM-exit MSR-load area and some in the
4170 * host-state area of the VMCS.
4171 *
4172 * These MSRs will be automatically restored on the host after every successful
4173 * VM-exit.
4174 *
4175 * @returns VBox status code.
4176 * @param pVCpu The cross context virtual CPU structure.
4177 *
4178 * @remarks No-long-jump zone!!!
4179 */
4180static int hmR0VmxExportHostMsrs(PVMCPU pVCpu)
4181{
4182 AssertPtr(pVCpu);
4183
4184 /*
4185 * Save MSRs that we restore lazily (due to preemption or transition to ring-3)
4186 * rather than swapping them on every VM-entry.
4187 */
4188 hmR0VmxLazySaveHostMsrs(pVCpu);
4189
4190 /*
4191 * Host Sysenter MSRs.
4192 */
4193 int rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
4194#if HC_ARCH_BITS == 32
4195 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
4196 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
4197#else
4198 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
4199 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
4200#endif
4201 AssertRCReturn(rc, rc);
4202
4203 /*
4204 * Host EFER MSR.
4205 *
4206 * If the CPU supports the newer VMCS controls for managing EFER, use it. Otherwise it's
4207 * done as part of auto-load/store MSR area in the VMCS, see hmR0VmxExportGuestMsrs().
4208 */
4209 PVM pVM = pVCpu->CTX_SUFF(pVM);
4210 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
4211 {
4212 rc = VMXWriteVmcs64(VMX_VMCS64_HOST_EFER_FULL, pVM->hm.s.vmx.u64HostMsrEfer);
4213 AssertRCReturn(rc, rc);
4214 }
4215
4216 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see
4217 * hmR0VmxExportGuestEntryExitCtls(). */
4218
4219 return VINF_SUCCESS;
4220}
4221
4222
4223/**
4224 * Figures out if we need to swap the EFER MSR which is particularly expensive.
4225 *
4226 * We check all relevant bits. For now, that's everything besides LMA/LME, as
4227 * these two bits are handled by VM-entry, see hmR0VMxExportGuestEntryExitCtls().
4228 *
4229 * @returns true if we need to load guest EFER, false otherwise.
4230 * @param pVCpu The cross context virtual CPU structure.
4231 *
4232 * @remarks Requires EFER, CR4.
4233 * @remarks No-long-jump zone!!!
4234 */
4235static bool hmR0VmxShouldSwapEferMsr(PCVMCPU pVCpu)
4236{
4237#ifdef HMVMX_ALWAYS_SWAP_EFER
4238 RT_NOREF(pVCpu);
4239 return true;
4240#else
4241 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4242#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
4243 /* For 32-bit hosts running 64-bit guests, we always swap EFER MSR in the world-switcher. Nothing to do here. */
4244 if (CPUMIsGuestInLongModeEx(pCtx))
4245 return false;
4246#endif
4247
4248 PVM pVM = pVCpu->CTX_SUFF(pVM);
4249 uint64_t const u64HostEfer = pVM->hm.s.vmx.u64HostMsrEfer;
4250 uint64_t const u64GuestEfer = pCtx->msrEFER;
4251
4252 /*
4253 * For 64-bit guests, if EFER.SCE bit differs, we need to swap the EFER MSR
4254 * to ensure that the guest's SYSCALL behaviour isn't broken, see @bugref{7386}.
4255 */
4256 if ( CPUMIsGuestInLongModeEx(pCtx)
4257 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
4258 return true;
4259
4260 /*
4261 * If the guest uses PAE and EFER.NXE bit differs, we need to swap the EFER MSR
4262 * as it affects guest paging. 64-bit paging implies CR4.PAE as well.
4263 *
4264 * See Intel spec. 4.5 "IA-32e Paging".
4265 * See Intel spec. 4.1.1 "Three Paging Modes".
4266 *
4267 * Verify that we always intercept CR4.PAE and CR0.PG bits, so we don't need to
4268 * import CR4 and CR0 from the VMCS here as those bits are always up to date.
4269 */
4270 Assert(hmR0VmxGetFixedCr4Mask(pVCpu) & X86_CR4_PAE);
4271 Assert(hmR0VmxGetFixedCr0Mask(pVCpu) & X86_CR0_PG);
4272 if ( (pCtx->cr4 & X86_CR4_PAE)
4273 && (pCtx->cr0 & X86_CR0_PG)
4274 && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
4275 {
4276 /* Assert that host is NX capable. */
4277 Assert(pVCpu->CTX_SUFF(pVM)->cpum.ro.HostFeatures.fNoExecute);
4278 return true;
4279 }
4280
4281 return false;
4282#endif
4283}
4284
4285/**
4286 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
4287 * VMCS.
4288 *
4289 * This is typically required when the guest changes paging mode.
4290 *
4291 * @returns VBox status code.
4292 * @param pVCpu The cross context virtual CPU structure.
4293 * @param pVmxTransient The VMX-transient structure.
4294 *
4295 * @remarks Requires EFER.
4296 * @remarks No-long-jump zone!!!
4297 */
4298static int hmR0VmxExportGuestEntryExitCtls(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
4299{
4300 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
4301 {
4302 PVM pVM = pVCpu->CTX_SUFF(pVM);
4303 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
4304
4305 /*
4306 * VM-entry controls.
4307 */
4308 {
4309 uint32_t fVal = pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
4310 uint32_t const fZap = pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
4311
4312 /*
4313 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
4314 * The first VT-x capable CPUs only supported the 1-setting of this bit.
4315 *
4316 * For nested-guests, this is a mandatory VM-entry control. It's also
4317 * required because we do not want to leak host bits to the nested-guest.
4318 */
4319 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
4320
4321 /*
4322 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
4323 *
4324 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
4325 * required to get the nested-guest working with hardware-assisted VMX execution.
4326 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested-hypervisor
4327 * can skip intercepting changes to the EFER MSR. This is why it it needs to be done
4328 * here rather than while merging the guest VMCS controls.
4329 */
4330 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
4331 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
4332 else
4333 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
4334
4335 /*
4336 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
4337 *
4338 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
4339 * regardless of whether the nested-guest VMCS specifies it because we are free to
4340 * load whatever MSRs we require and we do not need to modify the guest visible copy
4341 * of the VM-entry MSR load area.
4342 */
4343 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
4344 && hmR0VmxShouldSwapEferMsr(pVCpu))
4345 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
4346 else
4347 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
4348
4349 /*
4350 * The following should -not- be set (since we're not in SMM mode):
4351 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
4352 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
4353 */
4354
4355 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
4356 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
4357
4358 if ((fVal & fZap) == fVal)
4359 { /* likely */ }
4360 else
4361 {
4362 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
4363 pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed0, fVal, fZap));
4364 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
4365 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
4366 }
4367
4368 /* Commit it to the VMCS. */
4369 if (pVmcsInfo->u32EntryCtls != fVal)
4370 {
4371 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal);
4372 AssertRCReturn(rc, rc);
4373 pVmcsInfo->u32EntryCtls = fVal;
4374 }
4375 }
4376
4377 /*
4378 * VM-exit controls.
4379 */
4380 {
4381 uint32_t fVal = pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
4382 uint32_t const fZap = pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
4383
4384 /*
4385 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
4386 * supported the 1-setting of this bit.
4387 *
4388 * For nested-guests, we set the "save debug controls" as the converse
4389 * "load debug controls" is mandatory for nested-guests anyway.
4390 */
4391 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
4392
4393 /*
4394 * Set the host long mode active (EFER.LMA) bit (which Intel calls
4395 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
4396 * host EFER.LMA and EFER.LME bit to this value. See assertion in
4397 * hmR0VmxExportHostMsrs().
4398 *
4399 * For nested-guests, we always set this bit as we do not support 32-bit
4400 * hosts.
4401 */
4402#if HC_ARCH_BITS == 64
4403 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
4404#else
4405 Assert(!pVmxTransient->fIsNestedGuest);
4406 Assert( pVmcsInfo->pfnStartVM == VMXR0SwitcherStartVM64
4407 || pVmcsInfo->pfnStartVM == VMXR0StartVM32);
4408 /* Set the host address-space size based on the switcher, not guest state. See @bugref{8432}. */
4409 if (pVmcsInfo->pfnStartVM == VMXR0SwitcherStartVM64)
4410 {
4411 /* The switcher returns to long mode, the EFER MSR is managed by the switcher. */
4412 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
4413 }
4414 else
4415 Assert(!(fVal & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE));
4416#endif
4417
4418 /*
4419 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
4420 *
4421 * For nested-guests, we should use the "save IA32_EFER" control if we also
4422 * used the "load IA32_EFER" control while exporting VM-entry controls.
4423 */
4424 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
4425 && hmR0VmxShouldSwapEferMsr(pVCpu))
4426 {
4427 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
4428 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
4429 }
4430
4431 /*
4432 * Enable saving of the VMX-preemption timer value on VM-exit.
4433 * For nested-guests, currently not exposed/used.
4434 */
4435 if ( pVM->hm.s.vmx.fUsePreemptTimer
4436 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
4437 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
4438
4439 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
4440 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
4441
4442 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
4443 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
4444 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
4445
4446 if ((fVal & fZap) == fVal)
4447 { /* likely */ }
4448 else
4449 {
4450 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%R#X32\n",
4451 pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed0, fVal, fZap));
4452 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
4453 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
4454 }
4455
4456 /* Commit it to the VMCS. */
4457 if (pVmcsInfo->u32ExitCtls != fVal)
4458 {
4459 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal);
4460 AssertRCReturn(rc, rc);
4461 pVmcsInfo->u32ExitCtls = fVal;
4462 }
4463 }
4464
4465 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
4466 }
4467 return VINF_SUCCESS;
4468}
4469
4470
4471/**
4472 * Sets the TPR threshold in the VMCS.
4473 *
4474 * @returns VBox status code.
4475 * @param pVCpu The cross context virtual CPU structure.
4476 * @param pVmcsInfo The VMCS info. object.
4477 * @param u32TprThreshold The TPR threshold (task-priority class only).
4478 */
4479DECLINLINE(int) hmR0VmxApicSetTprThreshold(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
4480{
4481 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
4482 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4483 RT_NOREF2(pVCpu, pVmcsInfo);
4484 return VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
4485}
4486
4487
4488/**
4489 * Exports the guest APIC TPR state into the VMCS.
4490 *
4491 * @returns VBox status code.
4492 * @param pVCpu The cross context virtual CPU structure.
4493 * @param pVmxTransient The VMX-transient structure.
4494 *
4495 * @remarks No-long-jump zone!!!
4496 */
4497static int hmR0VmxExportGuestApicTpr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
4498{
4499 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
4500 {
4501 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
4502
4503 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
4504 if (!pVmxTransient->fIsNestedGuest)
4505 {
4506 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
4507 && APICIsEnabled(pVCpu))
4508 {
4509 /*
4510 * Setup TPR shadowing.
4511 */
4512 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4513 {
4514 bool fPendingIntr = false;
4515 uint8_t u8Tpr = 0;
4516 uint8_t u8PendingIntr = 0;
4517 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
4518 AssertRCReturn(rc, rc);
4519
4520 /*
4521 * If there are interrupts pending but masked by the TPR, instruct VT-x to
4522 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
4523 * priority of the pending interrupt so we can deliver the interrupt. If there
4524 * are no interrupts pending, set threshold to 0 to not cause any
4525 * TPR-below-threshold VM-exits.
4526 */
4527 Assert(pVmcsInfo->pbVirtApic);
4528 pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR] = u8Tpr;
4529 uint32_t u32TprThreshold = 0;
4530 if (fPendingIntr)
4531 {
4532 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
4533 (which is the Task-Priority Class). */
4534 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
4535 const uint8_t u8TprPriority = u8Tpr >> 4;
4536 if (u8PendingPriority <= u8TprPriority)
4537 u32TprThreshold = u8PendingPriority;
4538 }
4539
4540 rc = hmR0VmxApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
4541 AssertRCReturn(rc, rc);
4542 }
4543 }
4544 }
4545 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
4546 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
4547 }
4548 return VINF_SUCCESS;
4549}
4550
4551
4552/**
4553 * Gets the guest interruptibility-state.
4554 *
4555 * @returns Guest's interruptibility-state.
4556 * @param pVCpu The cross context virtual CPU structure.
4557 * @param pVmcsInfo The VMCS info. object.
4558 *
4559 * @remarks No-long-jump zone!!!
4560 */
4561static uint32_t hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
4562{
4563 /*
4564 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
4565 */
4566 uint32_t fIntrState = 0;
4567 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4568 {
4569 /* If inhibition is active, RIP and RFLAGS should've been updated
4570 (i.e. read previously from the VMCS or from ring-3). */
4571 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4572#ifdef VBOX_STRICT
4573 uint64_t const fExtrn = ASMAtomicUoReadU64(&pCtx->fExtrn);
4574 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
4575 AssertMsg(!(fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)), ("%#x\n", fExtrn));
4576#endif
4577 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
4578 {
4579 if (pCtx->eflags.Bits.u1IF)
4580 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4581 else
4582 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
4583 }
4584 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4585 {
4586 /*
4587 * We can clear the inhibit force flag as even if we go back to the recompiler
4588 * without executing guest code in VT-x, the flag's condition to be cleared is
4589 * met and thus the cleared state is correct.
4590 */
4591 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
4592 }
4593 }
4594
4595 /*
4596 * NMIs to the guest are blocked after an NMI is injected until the guest executes an IRET. We only
4597 * bother with virtual-NMI blocking when we have support for virtual NMIs in the CPU, otherwise
4598 * setting this would block host-NMIs and IRET will not clear the blocking.
4599 *
4600 * We always set NMI-exiting so when the host receives an NMI we get a VM-exit.
4601 *
4602 * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}.
4603 */
4604 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
4605 && CPUMIsGuestNmiBlocking(pVCpu))
4606 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
4607
4608 return fIntrState;
4609}
4610
4611
4612/**
4613 * Exports the exception intercepts required for guest execution in the VMCS.
4614 *
4615 * @returns VBox status code.
4616 * @param pVCpu The cross context virtual CPU structure.
4617 * @param pVmxTransient The VMX-transient structure.
4618 *
4619 * @remarks No-long-jump zone!!!
4620 */
4621static int hmR0VmxExportGuestXcptIntercepts(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
4622{
4623 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
4624 {
4625 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
4626 if ( !pVmxTransient->fIsNestedGuest
4627 && pVCpu->hm.s.fGIMTrapXcptUD)
4628 hmR0VmxAddXcptIntercept(pVmxTransient, X86_XCPT_UD);
4629 else
4630 hmR0VmxRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
4631
4632 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
4633 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
4634 }
4635 return VINF_SUCCESS;
4636}
4637
4638
4639/**
4640 * Exports the guest's RIP into the guest-state area in the VMCS.
4641 *
4642 * @returns VBox status code.
4643 * @param pVCpu The cross context virtual CPU structure.
4644 *
4645 * @remarks No-long-jump zone!!!
4646 */
4647static int hmR0VmxExportGuestRip(PVMCPU pVCpu)
4648{
4649 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP)
4650 {
4651 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
4652
4653 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
4654 AssertRCReturn(rc, rc);
4655
4656 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RIP);
4657 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
4658 }
4659 return VINF_SUCCESS;
4660}
4661
4662
4663/**
4664 * Exports the guest's RSP into the guest-state area in the VMCS.
4665 *
4666 * @returns VBox status code.
4667 * @param pVCpu The cross context virtual CPU structure.
4668 *
4669 * @remarks No-long-jump zone!!!
4670 */
4671static int hmR0VmxExportGuestRsp(PVMCPU pVCpu)
4672{
4673 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RSP)
4674 {
4675 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP);
4676
4677 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pVCpu->cpum.GstCtx.rsp);
4678 AssertRCReturn(rc, rc);
4679
4680 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RSP);
4681 }
4682 return VINF_SUCCESS;
4683}
4684
4685
4686/**
4687 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
4688 *
4689 * @returns VBox status code.
4690 * @param pVCpu The cross context virtual CPU structure.
4691 * @param pVmxTransient The VMX-transient structure.
4692 *
4693 * @remarks No-long-jump zone!!!
4694 */
4695static int hmR0VmxExportGuestRflags(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
4696{
4697 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
4698 {
4699 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
4700
4701 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
4702 Let us assert it as such and use 32-bit VMWRITE. */
4703 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
4704 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
4705 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
4706 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
4707
4708 /*
4709 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
4710 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
4711 * can run the real-mode guest code under Virtual 8086 mode.
4712 */
4713 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
4714 if (pVmcsInfo->RealMode.fRealOnV86Active)
4715 {
4716 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
4717 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
4718 Assert(!pVmxTransient->fIsNestedGuest);
4719 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
4720 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
4721 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
4722 }
4723
4724 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
4725 AssertRCReturn(rc, rc);
4726
4727 /*
4728 * Setup pending debug exceptions if the guest is single-stepping using EFLAGS.TF.
4729 *
4730 * We must avoid setting any automatic debug exceptions delivery when single-stepping
4731 * through the hypervisor debugger using EFLAGS.TF.
4732 */
4733 if ( !pVmxTransient->fIsNestedGuest
4734 && !pVCpu->hm.s.fSingleInstruction
4735 && fEFlags.Bits.u1TF)
4736 {
4737 /** @todo r=ramshankar: Warning!! We ASSUME EFLAGS.TF will not cleared on
4738 * premature trips to ring-3 esp since IEM does not yet handle it. */
4739 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS);
4740 AssertRCReturn(rc, rc);
4741 }
4742 /** @todo NSTVMX: Handling copying of VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS from
4743 * nested-guest VMCS. */
4744
4745 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
4746 Log4Func(("EFlags=%#RX32\n", fEFlags.u32));
4747 }
4748 return VINF_SUCCESS;
4749}
4750
4751
4752/**
4753 * Exports the guest CR0 control register into the guest-state area in the VMCS.
4754 *
4755 * The guest FPU state is always pre-loaded hence we don't need to bother about
4756 * sharing FPU related CR0 bits between the guest and host.
4757 *
4758 * @returns VBox status code.
4759 * @param pVCpu The cross context virtual CPU structure.
4760 * @param pVmxTransient The VMX-transient structure.
4761 *
4762 * @remarks No-long-jump zone!!!
4763 */
4764static int hmR0VmxExportGuestCR0(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
4765{
4766 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR0)
4767 {
4768 PVM pVM = pVCpu->CTX_SUFF(pVM);
4769 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
4770
4771 /*
4772 * Figure out fixed CR0 bits in VMX operation.
4773 */
4774 uint64_t fSetCr0 = pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
4775 uint64_t const fZapCr0 = pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
4776 if (pVM->hm.s.vmx.fUnrestrictedGuest)
4777 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
4778 else
4779 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
4780
4781 if (!pVmxTransient->fIsNestedGuest)
4782 {
4783 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
4784 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
4785 uint64_t const u64ShadowCr0 = u64GuestCr0;
4786 Assert(!RT_HI_U32(u64GuestCr0));
4787
4788 /*
4789 * Setup VT-x's view of the guest CR0.
4790 */
4791 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
4792 if (pVM->hm.s.fNestedPaging)
4793 {
4794 if (CPUMIsGuestPagingEnabled(pVCpu))
4795 {
4796 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
4797 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
4798 | VMX_PROC_CTLS_CR3_STORE_EXIT);
4799 }
4800 else
4801 {
4802 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
4803 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
4804 | VMX_PROC_CTLS_CR3_STORE_EXIT;
4805 }
4806
4807 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
4808 if (pVM->hm.s.vmx.fUnrestrictedGuest)
4809 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
4810 }
4811 else
4812 {
4813 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
4814 u64GuestCr0 |= X86_CR0_WP;
4815 }
4816
4817 /*
4818 * Guest FPU bits.
4819 *
4820 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
4821 * using CR0.TS.
4822 *
4823 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
4824 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
4825 */
4826 u64GuestCr0 |= X86_CR0_NE;
4827
4828 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
4829 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
4830
4831 /*
4832 * Update exception intercepts.
4833 */
4834 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
4835 if (pVmcsInfo->RealMode.fRealOnV86Active)
4836 {
4837 Assert(PDMVmmDevHeapIsEnabled(pVM));
4838 Assert(pVM->hm.s.vmx.pRealModeTSS);
4839 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
4840 }
4841 else
4842 {
4843 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
4844 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
4845 if (fInterceptMF)
4846 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
4847 }
4848
4849 /* Additional intercepts for debugging, define these yourself explicitly. */
4850#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
4851 uXcptBitmap |= 0
4852 | RT_BIT(X86_XCPT_BP)
4853 | RT_BIT(X86_XCPT_DE)
4854 | RT_BIT(X86_XCPT_NM)
4855 | RT_BIT(X86_XCPT_TS)
4856 | RT_BIT(X86_XCPT_UD)
4857 | RT_BIT(X86_XCPT_NP)
4858 | RT_BIT(X86_XCPT_SS)
4859 | RT_BIT(X86_XCPT_GP)
4860 | RT_BIT(X86_XCPT_PF)
4861 | RT_BIT(X86_XCPT_MF)
4862 ;
4863#elif defined(HMVMX_ALWAYS_TRAP_PF)
4864 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
4865#endif
4866 if (pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv)
4867 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
4868 Assert(pVM->hm.s.fNestedPaging || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
4869
4870 /* Apply the fixed CR0 bits and enable caching. */
4871 u64GuestCr0 |= fSetCr0;
4872 u64GuestCr0 &= fZapCr0;
4873 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
4874
4875 /* Commit the CR0 and related fields to the guest VMCS. */
4876 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u64GuestCr0); /** @todo Fix to 64-bit when we drop 32-bit. */
4877 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0);
4878 if (uProcCtls != pVmcsInfo->u32ProcCtls)
4879 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
4880 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
4881 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
4882 AssertRCReturn(rc, rc);
4883
4884 /* Update our caches. */
4885 pVmcsInfo->u32ProcCtls = uProcCtls;
4886 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
4887
4888 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
4889 }
4890 else
4891 {
4892 /*
4893 * With nested-guests, we may have extended the guest/host mask here (since we
4894 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
4895 * mask can include more bits (to read from the nested-guest CR0 read-shadow) than
4896 * the guest hypervisor originally supplied. Thus, we should, in essence, copy
4897 * those bits from the nested-guest CR0 into the nested-guest CR0 read shadow.
4898 */
4899 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
4900 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
4901 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(pVCpu, &pVCpu->cpum.GstCtx);
4902 Assert(!RT_HI_U32(u64GuestCr0));
4903 Assert(u64GuestCr0 & X86_CR0_NE);
4904
4905 /* Apply the fixed CR0 bits and enable caching. */
4906 u64GuestCr0 |= fSetCr0;
4907 u64GuestCr0 &= fZapCr0;
4908 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
4909
4910 /* Commit the CR0 and CR0 read shadow to the nested-guest VMCS. */
4911 /** @todo NSTVMX: Fix to 64-bit when we drop 32-bit. */
4912 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u64GuestCr0);
4913 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0);
4914 AssertRCReturn(rc, rc);
4915
4916 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
4917 }
4918
4919 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR0);
4920 }
4921
4922 return VINF_SUCCESS;
4923}
4924
4925
4926/**
4927 * Exports the guest control registers (CR3, CR4) into the guest-state area
4928 * in the VMCS.
4929 *
4930 * @returns VBox strict status code.
4931 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
4932 * without unrestricted guest access and the VMMDev is not presently
4933 * mapped (e.g. EFI32).
4934 *
4935 * @param pVCpu The cross context virtual CPU structure.
4936 * @param pVmxTransient The VMX-transient structure.
4937 *
4938 * @remarks No-long-jump zone!!!
4939 */
4940static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
4941{
4942 int rc = VINF_SUCCESS;
4943 PVM pVM = pVCpu->CTX_SUFF(pVM);
4944
4945 /*
4946 * Guest CR2.
4947 * It's always loaded in the assembler code. Nothing to do here.
4948 */
4949
4950 /*
4951 * Guest CR3.
4952 */
4953 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR3)
4954 {
4955 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
4956
4957 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
4958 if (pVM->hm.s.fNestedPaging)
4959 {
4960 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
4961 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
4962
4963 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
4964 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
4965 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
4966 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
4967
4968 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
4969 pVmcsInfo->HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
4970 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
4971
4972 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
4973 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
4974 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
4975 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
4976 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
4977 || (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY),
4978 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
4979
4980 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
4981 AssertRCReturn(rc, rc);
4982
4983 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4984 if ( pVM->hm.s.vmx.fUnrestrictedGuest
4985 || CPUMIsGuestPagingEnabledEx(pCtx))
4986 {
4987 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
4988 if (CPUMIsGuestInPAEModeEx(pCtx))
4989 {
4990 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
4991 AssertRCReturn(rc, rc);
4992 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u);
4993 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u);
4994 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u);
4995 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u);
4996 AssertRCReturn(rc, rc);
4997 }
4998
4999 /*
5000 * The guest's view of its CR3 is unblemished with nested paging when the
5001 * guest is using paging or we have unrestricted guest execution to handle
5002 * the guest when it's not using paging.
5003 */
5004 GCPhysGuestCR3 = pCtx->cr3;
5005 }
5006 else
5007 {
5008 /*
5009 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
5010 * thinks it accesses physical memory directly, we use our identity-mapped
5011 * page table to map guest-linear to guest-physical addresses. EPT takes care
5012 * of translating it to host-physical addresses.
5013 */
5014 RTGCPHYS GCPhys;
5015 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
5016
5017 /* We obtain it here every time as the guest could have relocated this PCI region. */
5018 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
5019 if (RT_SUCCESS(rc))
5020 { /* likely */ }
5021 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
5022 {
5023 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
5024 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
5025 }
5026 else
5027 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
5028
5029 GCPhysGuestCR3 = GCPhys;
5030 }
5031
5032 Log4Func(("u32GuestCr3=%#RGp (GstN)\n", GCPhysGuestCR3));
5033 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
5034 AssertRCReturn(rc, rc);
5035 }
5036 else
5037 {
5038 /* Non-nested paging case, just use the hypervisor's CR3. */
5039 RTHCPHYS const HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
5040
5041 Log4Func(("u32GuestCr3=%#RHv (HstN)\n", HCPhysGuestCR3));
5042 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
5043 AssertRCReturn(rc, rc);
5044 }
5045
5046 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR3);
5047 }
5048
5049 /*
5050 * Guest CR4.
5051 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
5052 */
5053 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR4)
5054 {
5055 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5056 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5057
5058 /*
5059 * Figure out fixed CR4 bits in VMX operation.
5060 */
5061 uint64_t const fSetCr4 = pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
5062 uint64_t const fZapCr4 = pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
5063
5064 /*
5065 * With nested-guests, we may have extended the guest/host mask here (since we
5066 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
5067 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
5068 * the guest hypervisor originally supplied. Thus, we should, in essence, copy
5069 * those bits from the nested-guest CR4 into the nested-guest CR4 read shadow.
5070 */
5071 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5072 uint64_t u64GuestCr4 = pCtx->cr4;
5073 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
5074 ? pCtx->cr4
5075 : CPUMGetGuestVmxMaskedCr4(pVCpu, pCtx);
5076 Assert(!RT_HI_U32(u64GuestCr4));
5077
5078 /*
5079 * Setup VT-x's view of the guest CR4.
5080 *
5081 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
5082 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
5083 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
5084 *
5085 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
5086 */
5087 if (pVmcsInfo->RealMode.fRealOnV86Active)
5088 {
5089 Assert(pVM->hm.s.vmx.pRealModeTSS);
5090 Assert(PDMVmmDevHeapIsEnabled(pVM));
5091 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
5092 }
5093
5094 if (pVM->hm.s.fNestedPaging)
5095 {
5096 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
5097 && !pVM->hm.s.vmx.fUnrestrictedGuest)
5098 {
5099 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
5100 u64GuestCr4 |= X86_CR4_PSE;
5101 /* Our identity mapping is a 32-bit page directory. */
5102 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
5103 }
5104 /* else use guest CR4.*/
5105 }
5106 else
5107 {
5108 Assert(!pVmxTransient->fIsNestedGuest);
5109
5110 /*
5111 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
5112 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
5113 */
5114 switch (pVCpu->hm.s.enmShadowMode)
5115 {
5116 case PGMMODE_REAL: /* Real-mode. */
5117 case PGMMODE_PROTECTED: /* Protected mode without paging. */
5118 case PGMMODE_32_BIT: /* 32-bit paging. */
5119 {
5120 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
5121 break;
5122 }
5123
5124 case PGMMODE_PAE: /* PAE paging. */
5125 case PGMMODE_PAE_NX: /* PAE paging with NX. */
5126 {
5127 u64GuestCr4 |= X86_CR4_PAE;
5128 break;
5129 }
5130
5131 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
5132 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
5133#ifdef VBOX_ENABLE_64_BITS_GUESTS
5134 break;
5135#endif
5136 default:
5137 AssertFailed();
5138 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
5139 }
5140 }
5141
5142 /* Apply the fixed CR4 bits (mainly CR4.VMXE). */
5143 u64GuestCr4 |= fSetCr4;
5144 u64GuestCr4 &= fZapCr4;
5145
5146 /* Commit the CR4 and CR4 read shadow to the guest VMCS. */
5147 /** @todo Fix to 64-bit when we drop 32-bit. */
5148 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u64GuestCr4);
5149 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4);
5150 AssertRCReturn(rc, rc);
5151
5152 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
5153 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
5154
5155 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4);
5156
5157 Log4Func(("cr4=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
5158 }
5159 return rc;
5160}
5161
5162
5163/**
5164 * Exports the guest debug registers into the guest-state area in the VMCS.
5165 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
5166 *
5167 * This also sets up whether \#DB and MOV DRx accesses cause VM-exits.
5168 *
5169 * @returns VBox status code.
5170 * @param pVCpu The cross context virtual CPU structure.
5171 * @param pVmxTransient The VMX-transient structure.
5172 *
5173 * @remarks No-long-jump zone!!!
5174 */
5175static int hmR0VmxExportSharedDebugState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
5176{
5177 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
5178
5179 /** @todo NSTVMX: Figure out what we want to do with nested-guest instruction
5180 * stepping. */
5181 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5182 if (pVmxTransient->fIsNestedGuest)
5183 {
5184 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, CPUMGetGuestDR7(pVCpu));
5185 AssertRCReturn(rc, rc);
5186 return VINF_SUCCESS;
5187 }
5188
5189#ifdef VBOX_STRICT
5190 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
5191 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5192 {
5193 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
5194 Assert((pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0);
5195 Assert((pVCpu->cpum.GstCtx.dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK);
5196 }
5197#endif
5198
5199 bool fSteppingDB = false;
5200 bool fInterceptMovDRx = false;
5201 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
5202 if (pVCpu->hm.s.fSingleInstruction)
5203 {
5204 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
5205 PVM pVM = pVCpu->CTX_SUFF(pVM);
5206 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MONITOR_TRAP_FLAG)
5207 {
5208 uProcCtls |= VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
5209 Assert(fSteppingDB == false);
5210 }
5211 else
5212 {
5213 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_TF;
5214 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_RFLAGS;
5215 pVCpu->hm.s.fClearTrapFlag = true;
5216 fSteppingDB = true;
5217 }
5218 }
5219
5220 uint32_t u32GuestDr7;
5221 if ( fSteppingDB
5222 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
5223 {
5224 /*
5225 * Use the combined guest and host DRx values found in the hypervisor register set
5226 * because the hypervisor debugger has breakpoints active or someone is single stepping
5227 * on the host side without a monitor trap flag.
5228 *
5229 * Note! DBGF expects a clean DR6 state before executing guest code.
5230 */
5231#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
5232 if ( CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5233 && !CPUMIsHyperDebugStateActivePending(pVCpu))
5234 {
5235 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
5236 Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
5237 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
5238 }
5239 else
5240#endif
5241 if (!CPUMIsHyperDebugStateActive(pVCpu))
5242 {
5243 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
5244 Assert(CPUMIsHyperDebugStateActive(pVCpu));
5245 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
5246 }
5247
5248 /* Update DR7 with the hypervisor value (other DRx registers are handled by CPUM one way or another). */
5249 u32GuestDr7 = (uint32_t)CPUMGetHyperDR7(pVCpu);
5250 pVCpu->hm.s.fUsingHyperDR7 = true;
5251 fInterceptMovDRx = true;
5252 }
5253 else
5254 {
5255 /*
5256 * If the guest has enabled debug registers, we need to load them prior to
5257 * executing guest code so they'll trigger at the right time.
5258 */
5259 if (pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
5260 {
5261#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
5262 if ( CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5263 && !CPUMIsGuestDebugStateActivePending(pVCpu))
5264 {
5265 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
5266 Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
5267 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
5268 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
5269 }
5270 else
5271#endif
5272 if (!CPUMIsGuestDebugStateActive(pVCpu))
5273 {
5274 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
5275 Assert(CPUMIsGuestDebugStateActive(pVCpu));
5276 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
5277 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
5278 }
5279 Assert(!fInterceptMovDRx);
5280 }
5281 /*
5282 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
5283 * must intercept #DB in order to maintain a correct DR6 guest value, and
5284 * because we need to intercept it to prevent nested #DBs from hanging the
5285 * CPU, we end up always having to intercept it. See hmR0VmxSetupVmcsXcptBitmap().
5286 */
5287#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
5288 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu)
5289 && !CPUMIsGuestDebugStateActive(pVCpu))
5290#else
5291 else if (!CPUMIsGuestDebugStateActive(pVCpu))
5292#endif
5293 {
5294 fInterceptMovDRx = true;
5295 }
5296
5297 /* Update DR7 with the actual guest value. */
5298 u32GuestDr7 = pVCpu->cpum.GstCtx.dr[7];
5299 pVCpu->hm.s.fUsingHyperDR7 = false;
5300 }
5301
5302 if (fInterceptMovDRx)
5303 uProcCtls |= VMX_PROC_CTLS_MOV_DR_EXIT;
5304 else
5305 uProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
5306
5307 /*
5308 * Update the processor-based VM-execution controls with the MOV-DRx intercepts and the
5309 * monitor-trap flag and update our cache.
5310 */
5311 if (uProcCtls != pVmcsInfo->u32ProcCtls)
5312 {
5313 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
5314 AssertRCReturn(rc2, rc2);
5315 pVmcsInfo->u32ProcCtls = uProcCtls;
5316 }
5317
5318 /*
5319 * Update guest DR7.
5320 */
5321 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, u32GuestDr7);
5322 AssertRCReturn(rc, rc);
5323
5324 /*
5325 * If we have forced EFLAGS.TF to be set because we're single-stepping in the hypervisor debugger,
5326 * we need to clear interrupt inhibition if any as otherwise it causes a VM-entry failure.
5327 *
5328 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
5329 */
5330 if (fSteppingDB)
5331 {
5332 Assert(pVCpu->hm.s.fSingleInstruction);
5333 Assert(pVCpu->cpum.GstCtx.eflags.Bits.u1TF);
5334
5335 uint32_t fIntrState = 0;
5336 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
5337 AssertRCReturn(rc, rc);
5338
5339 if (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5340 {
5341 fIntrState &= ~(VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5342 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5343 AssertRCReturn(rc, rc);
5344 }
5345 }
5346
5347 return VINF_SUCCESS;
5348}
5349
5350
5351#ifdef VBOX_STRICT
5352/**
5353 * Strict function to validate segment registers.
5354 *
5355 * @param pVCpu The cross context virtual CPU structure.
5356 * @param pVmcsInfo The VMCS info. object.
5357 *
5358 * @remarks Will import guest CR0 on strict builds during validation of
5359 * segments.
5360 */
5361static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
5362{
5363 /*
5364 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
5365 *
5366 * The reason we check for attribute value 0 in this function and not just the unusable bit is
5367 * because hmR0VmxExportGuestSegReg() only updates the VMCS' copy of the value with the
5368 * unusable bit and doesn't change the guest-context value.
5369 */
5370 PVM pVM = pVCpu->CTX_SUFF(pVM);
5371 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5372 hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
5373 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
5374 && ( !CPUMIsGuestInRealModeEx(pCtx)
5375 && !CPUMIsGuestInV86ModeEx(pCtx)))
5376 {
5377 /* Protected mode checks */
5378 /* CS */
5379 Assert(pCtx->cs.Attr.n.u1Present);
5380 Assert(!(pCtx->cs.Attr.u & 0xf00));
5381 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
5382 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5383 || !(pCtx->cs.Attr.n.u1Granularity));
5384 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
5385 || (pCtx->cs.Attr.n.u1Granularity));
5386 /* CS cannot be loaded with NULL in protected mode. */
5387 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
5388 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5389 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
5390 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5391 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
5392 else
5393 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
5394 /* SS */
5395 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
5396 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
5397 if ( !(pCtx->cr0 & X86_CR0_PE)
5398 || pCtx->cs.Attr.n.u4Type == 3)
5399 {
5400 Assert(!pCtx->ss.Attr.n.u2Dpl);
5401 }
5402 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5403 {
5404 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
5405 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
5406 Assert(pCtx->ss.Attr.n.u1Present);
5407 Assert(!(pCtx->ss.Attr.u & 0xf00));
5408 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
5409 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5410 || !(pCtx->ss.Attr.n.u1Granularity));
5411 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
5412 || (pCtx->ss.Attr.n.u1Granularity));
5413 }
5414 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSegReg(). */
5415 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5416 {
5417 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
5418 Assert(pCtx->ds.Attr.n.u1Present);
5419 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
5420 Assert(!(pCtx->ds.Attr.u & 0xf00));
5421 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
5422 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5423 || !(pCtx->ds.Attr.n.u1Granularity));
5424 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
5425 || (pCtx->ds.Attr.n.u1Granularity));
5426 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5427 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
5428 }
5429 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5430 {
5431 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
5432 Assert(pCtx->es.Attr.n.u1Present);
5433 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
5434 Assert(!(pCtx->es.Attr.u & 0xf00));
5435 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
5436 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
5437 || !(pCtx->es.Attr.n.u1Granularity));
5438 Assert( !(pCtx->es.u32Limit & 0xfff00000)
5439 || (pCtx->es.Attr.n.u1Granularity));
5440 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5441 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
5442 }
5443 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5444 {
5445 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
5446 Assert(pCtx->fs.Attr.n.u1Present);
5447 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
5448 Assert(!(pCtx->fs.Attr.u & 0xf00));
5449 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
5450 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5451 || !(pCtx->fs.Attr.n.u1Granularity));
5452 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
5453 || (pCtx->fs.Attr.n.u1Granularity));
5454 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5455 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
5456 }
5457 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5458 {
5459 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
5460 Assert(pCtx->gs.Attr.n.u1Present);
5461 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
5462 Assert(!(pCtx->gs.Attr.u & 0xf00));
5463 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
5464 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5465 || !(pCtx->gs.Attr.n.u1Granularity));
5466 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
5467 || (pCtx->gs.Attr.n.u1Granularity));
5468 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5469 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
5470 }
5471 /* 64-bit capable CPUs. */
5472# if HC_ARCH_BITS == 64
5473 Assert(!RT_HI_U32(pCtx->cs.u64Base));
5474 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
5475 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
5476 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
5477# endif
5478 }
5479 else if ( CPUMIsGuestInV86ModeEx(pCtx)
5480 || ( CPUMIsGuestInRealModeEx(pCtx)
5481 && !pVM->hm.s.vmx.fUnrestrictedGuest))
5482 {
5483 /* Real and v86 mode checks. */
5484 /* hmR0VmxExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
5485 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5486 if (pVmcsInfo->RealMode.fRealOnV86Active)
5487 {
5488 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
5489 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5490 }
5491 else
5492 {
5493 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
5494 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5495 }
5496
5497 /* CS */
5498 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
5499 Assert(pCtx->cs.u32Limit == 0xffff);
5500 Assert(u32CSAttr == 0xf3);
5501 /* SS */
5502 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
5503 Assert(pCtx->ss.u32Limit == 0xffff);
5504 Assert(u32SSAttr == 0xf3);
5505 /* DS */
5506 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
5507 Assert(pCtx->ds.u32Limit == 0xffff);
5508 Assert(u32DSAttr == 0xf3);
5509 /* ES */
5510 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
5511 Assert(pCtx->es.u32Limit == 0xffff);
5512 Assert(u32ESAttr == 0xf3);
5513 /* FS */
5514 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
5515 Assert(pCtx->fs.u32Limit == 0xffff);
5516 Assert(u32FSAttr == 0xf3);
5517 /* GS */
5518 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
5519 Assert(pCtx->gs.u32Limit == 0xffff);
5520 Assert(u32GSAttr == 0xf3);
5521 /* 64-bit capable CPUs. */
5522# if HC_ARCH_BITS == 64
5523 Assert(!RT_HI_U32(pCtx->cs.u64Base));
5524 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
5525 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
5526 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
5527# endif
5528 }
5529}
5530#endif /* VBOX_STRICT */
5531
5532
5533/**
5534 * Exports a guest segment register into the guest-state area in the VMCS.
5535 *
5536 * @returns VBox status code.
5537 * @param pVCpu The cross context virtual CPU structure.
5538 * @param pVmcsInfo The VMCS info. object.
5539 * @param iSegReg The segment register number (X86_SREG_XXX).
5540 * @param pSelReg Pointer to the segment selector.
5541 *
5542 * @remarks No-long-jump zone!!!
5543 */
5544static int hmR0VmxExportGuestSegReg(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t iSegReg, PCCPUMSELREG pSelReg)
5545{
5546 Assert(iSegReg < X86_SREG_COUNT);
5547 uint32_t const idxSel = g_aVmcsSegSel[iSegReg];
5548 uint32_t const idxLimit = g_aVmcsSegLimit[iSegReg];
5549 uint32_t const idxBase = g_aVmcsSegBase[iSegReg];
5550 uint32_t const idxAttr = g_aVmcsSegAttr[iSegReg];
5551
5552 uint32_t u32Access = pSelReg->Attr.u;
5553 if (pVmcsInfo->RealMode.fRealOnV86Active)
5554 {
5555 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
5556 u32Access = 0xf3;
5557 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
5558 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
5559 RT_NOREF_PV(pVCpu);
5560 }
5561 else
5562 {
5563 /*
5564 * The way to differentiate between whether this is really a null selector or was just
5565 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
5566 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
5567 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
5568 * NULL selectors loaded in protected-mode have their attribute as 0.
5569 */
5570 if (!u32Access)
5571 u32Access = X86DESCATTR_UNUSABLE;
5572 }
5573
5574 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
5575 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
5576 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
5577
5578 /*
5579 * Commit it to the VMCS.
5580 */
5581 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel);
5582 rc |= VMXWriteVmcs32(idxLimit, pSelReg->u32Limit);
5583 rc |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base);
5584 rc |= VMXWriteVmcs32(idxAttr, u32Access);
5585 AssertRCReturn(rc, rc);
5586 return rc;
5587}
5588
5589
5590/**
5591 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
5592 * area in the VMCS.
5593 *
5594 * @returns VBox status code.
5595 * @param pVCpu The cross context virtual CPU structure.
5596 * @param pVmxTransient The VMX-transient structure.
5597 *
5598 * @remarks Will import guest CR0 on strict builds during validation of
5599 * segments.
5600 * @remarks No-long-jump zone!!!
5601 */
5602static int hmR0VmxExportGuestSegRegsXdtr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
5603{
5604 int rc = VERR_INTERNAL_ERROR_5;
5605 PVM pVM = pVCpu->CTX_SUFF(pVM);
5606 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5607 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5608
5609 /*
5610 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
5611 */
5612 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
5613 {
5614#ifdef VBOX_WITH_REM
5615 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
5616 {
5617 Assert(!pVmxTransient->fIsNestedGuest);
5618 Assert(pVM->hm.s.vmx.pRealModeTSS);
5619 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
5620 if ( pVmcsInfo->fWasInRealMode
5621 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
5622 {
5623 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
5624 in real-mode (e.g. OpenBSD 4.0) */
5625 REMFlushTBs(pVM);
5626 Log4Func(("Switch to protected mode detected!\n"));
5627 pVmcsInfo->fWasInRealMode = false;
5628 }
5629 }
5630#endif
5631 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CS)
5632 {
5633 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
5634 if (pVmcsInfo->RealMode.fRealOnV86Active)
5635 pVmcsInfo->RealMode.AttrCS.u = pCtx->cs.Attr.u;
5636 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
5637 AssertRCReturn(rc, rc);
5638 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CS);
5639 }
5640
5641 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SS)
5642 {
5643 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
5644 if (pVmcsInfo->RealMode.fRealOnV86Active)
5645 pVmcsInfo->RealMode.AttrSS.u = pCtx->ss.Attr.u;
5646 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
5647 AssertRCReturn(rc, rc);
5648 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SS);
5649 }
5650
5651 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_DS)
5652 {
5653 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
5654 if (pVmcsInfo->RealMode.fRealOnV86Active)
5655 pVmcsInfo->RealMode.AttrDS.u = pCtx->ds.Attr.u;
5656 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
5657 AssertRCReturn(rc, rc);
5658 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_DS);
5659 }
5660
5661 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_ES)
5662 {
5663 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
5664 if (pVmcsInfo->RealMode.fRealOnV86Active)
5665 pVmcsInfo->RealMode.AttrES.u = pCtx->es.Attr.u;
5666 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
5667 AssertRCReturn(rc, rc);
5668 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_ES);
5669 }
5670
5671 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_FS)
5672 {
5673 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
5674 if (pVmcsInfo->RealMode.fRealOnV86Active)
5675 pVmcsInfo->RealMode.AttrFS.u = pCtx->fs.Attr.u;
5676 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
5677 AssertRCReturn(rc, rc);
5678 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_FS);
5679 }
5680
5681 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GS)
5682 {
5683 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
5684 if (pVmcsInfo->RealMode.fRealOnV86Active)
5685 pVmcsInfo->RealMode.AttrGS.u = pCtx->gs.Attr.u;
5686 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
5687 AssertRCReturn(rc, rc);
5688 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GS);
5689 }
5690
5691#ifdef VBOX_STRICT
5692 hmR0VmxValidateSegmentRegs(pVCpu, pVmcsInfo);
5693#endif
5694 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
5695 pCtx->cs.Attr.u));
5696 }
5697
5698 /*
5699 * Guest TR.
5700 */
5701 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_TR)
5702 {
5703 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
5704
5705 /*
5706 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
5707 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
5708 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
5709 */
5710 uint16_t u16Sel;
5711 uint32_t u32Limit;
5712 uint64_t u64Base;
5713 uint32_t u32AccessRights;
5714 if (!pVmcsInfo->RealMode.fRealOnV86Active)
5715 {
5716 u16Sel = pCtx->tr.Sel;
5717 u32Limit = pCtx->tr.u32Limit;
5718 u64Base = pCtx->tr.u64Base;
5719 u32AccessRights = pCtx->tr.Attr.u;
5720 }
5721 else
5722 {
5723 Assert(!pVmxTransient->fIsNestedGuest);
5724 Assert(pVM->hm.s.vmx.pRealModeTSS);
5725 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
5726
5727 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
5728 RTGCPHYS GCPhys;
5729 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
5730 AssertRCReturn(rc, rc);
5731
5732 X86DESCATTR DescAttr;
5733 DescAttr.u = 0;
5734 DescAttr.n.u1Present = 1;
5735 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
5736
5737 u16Sel = 0;
5738 u32Limit = HM_VTX_TSS_SIZE;
5739 u64Base = GCPhys;
5740 u32AccessRights = DescAttr.u;
5741 }
5742
5743 /* Validate. */
5744 Assert(!(u16Sel & RT_BIT(2)));
5745 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
5746 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
5747 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
5748 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
5749 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
5750 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
5751 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
5752 Assert( (u32Limit & 0xfff) == 0xfff
5753 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
5754 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
5755 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
5756
5757 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_TR_SEL, u16Sel);
5758 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit);
5759 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights);
5760 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base);
5761 AssertRCReturn(rc, rc);
5762
5763 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_TR);
5764 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
5765 }
5766
5767 /*
5768 * Guest GDTR.
5769 */
5770 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GDTR)
5771 {
5772 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
5773
5774 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt);
5775 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt);
5776 AssertRCReturn(rc, rc);
5777
5778 /* Validate. */
5779 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
5780
5781 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
5782 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
5783 }
5784
5785 /*
5786 * Guest LDTR.
5787 */
5788 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_LDTR)
5789 {
5790 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
5791
5792 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
5793 uint32_t u32Access;
5794 if ( !pVmxTransient->fIsNestedGuest
5795 && !pCtx->ldtr.Attr.u)
5796 u32Access = X86DESCATTR_UNUSABLE;
5797 else
5798 u32Access = pCtx->ldtr.Attr.u;
5799
5800 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel);
5801 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit);
5802 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access);
5803 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base);
5804 AssertRCReturn(rc, rc);
5805
5806 /* Validate. */
5807 if (!(u32Access & X86DESCATTR_UNUSABLE))
5808 {
5809 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
5810 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
5811 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
5812 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
5813 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
5814 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
5815 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
5816 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
5817 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
5818 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
5819 }
5820
5821 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
5822 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
5823 }
5824
5825 /*
5826 * Guest IDTR.
5827 */
5828 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_IDTR)
5829 {
5830 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
5831
5832 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt);
5833 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt);
5834 AssertRCReturn(rc, rc);
5835
5836 /* Validate. */
5837 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
5838
5839 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
5840 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
5841 }
5842
5843 return VINF_SUCCESS;
5844}
5845
5846
5847/**
5848 * Exports certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
5849 * areas.
5850 *
5851 * These MSRs will automatically be loaded to the host CPU on every successful
5852 * VM-entry and stored from the host CPU on every successful VM-exit.
5853 *
5854 * We creates/updates MSR slots for the host MSRs in the VM-exit MSR-load area. The
5855 * actual host MSR values are not- updated here for performance reasons. See
5856 * hmR0VmxExportHostMsrs().
5857 *
5858 * We also exports the guest sysenter MSRs into the guest-state area in the VMCS.
5859 *
5860 * @returns VBox status code.
5861 * @param pVCpu The cross context virtual CPU structure.
5862 * @param pVmxTransient The VMX-transient structure.
5863 *
5864 * @remarks No-long-jump zone!!!
5865 */
5866static int hmR0VmxExportGuestMsrs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
5867{
5868 AssertPtr(pVCpu);
5869 AssertPtr(pVmxTransient);
5870
5871 PVM pVM = pVCpu->CTX_SUFF(pVM);
5872 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5873
5874 /*
5875 * MSRs that we use the auto-load/store MSR area in the VMCS.
5876 * For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs().
5877 * The host MSR values are updated when it's safe in hmR0VmxLazySaveHostMsrs().
5878 *
5879 * For nested-guests, the guests MSRs from the VM-entry MSR-load area are already
5880 * loaded (into the guest-CPU context) by the VMLAUNCH/VMRESUME instruction
5881 * emulation, nothing to do here.
5882 */
5883 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
5884 {
5885 if ( !pVmxTransient->fIsNestedGuest
5886 && pVM->hm.s.fAllow64BitGuests)
5887 {
5888#if HC_ARCH_BITS == 32
5889 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_KERNEL_GS_BASE);
5890 Assert(!pVmxTransient->fIsNestedGuest);
5891
5892 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_LSTAR, pCtx->msrLSTAR, true, false);
5893 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_STAR, pCtx->msrSTAR, true, false);
5894 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_SF_MASK, pCtx->msrSFMASK, true, false);
5895 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE, true, false);
5896 AssertRCReturn(rc, rc);
5897#endif
5898 }
5899 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_AUTO_MSRS);
5900 }
5901
5902 /*
5903 * Guest Sysenter MSRs.
5904 */
5905 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
5906 {
5907 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
5908
5909 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
5910 {
5911 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pCtx->SysEnter.cs);
5912 AssertRCReturn(rc, rc);
5913 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR);
5914 }
5915
5916 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
5917 {
5918 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip);
5919 AssertRCReturn(rc, rc);
5920 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
5921 }
5922
5923 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
5924 {
5925 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp);
5926 AssertRCReturn(rc, rc);
5927 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
5928 }
5929 }
5930
5931 /*
5932 * Guest/host EFER MSR.
5933 */
5934 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR)
5935 {
5936 /* Whether we are using the VMCS to swap the EFER MSR must have been
5937 determined earlier while exporting VM-entry/VM-exit controls. */
5938 Assert(!(ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS));
5939 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
5940
5941 if (hmR0VmxShouldSwapEferMsr(pVCpu))
5942 {
5943 /*
5944 * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
5945 * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
5946 */
5947 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
5948 {
5949 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pCtx->msrEFER);
5950 AssertRCReturn(rc, rc);
5951 }
5952 else
5953 {
5954 /*
5955 * We shall use the auto-load/store MSR area only for loading the EFER MSR but we must
5956 * continue to intercept guest read and write accesses to it, see @bugref{7386#c16}.
5957 */
5958 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER, pCtx->msrEFER,
5959 false /* fSetReadWrite */, false /* fUpdateHostMsr */);
5960 AssertRCReturn(rc, rc);
5961 }
5962 }
5963 else if (!pVM->hm.s.vmx.fSupportsVmcsEfer)
5964 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER);
5965
5966 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);
5967 }
5968
5969 /*
5970 * Other MSRs.
5971 * Speculation Control (R/W).
5972 */
5973 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_OTHER_MSRS)
5974 {
5975 HMVMX_CPUMCTX_ASSERT(pVCpu, HM_CHANGED_GUEST_OTHER_MSRS);
5976 if (pVM->cpum.ro.GuestFeatures.fIbrs)
5977 {
5978 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu),
5979 false /* fSetReadWrite */, false /* fUpdateHostMsr */);
5980 AssertRCReturn(rc, rc);
5981 }
5982 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS);
5983 }
5984
5985 return VINF_SUCCESS;
5986}
5987
5988
5989/**
5990 * Selects up the appropriate function to run guest code.
5991 *
5992 * @returns VBox status code.
5993 * @param pVCpu The cross context virtual CPU structure.
5994 * @param pVmxTransient The VMX-transient structure.
5995 *
5996 * @remarks No-long-jump zone!!!
5997 */
5998static int hmR0VmxSelectVMRunHandler(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
5999{
6000 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6001 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6002
6003 if (CPUMIsGuestInLongModeEx(pCtx))
6004 {
6005#ifndef VBOX_ENABLE_64_BITS_GUESTS
6006 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
6007#endif
6008 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
6009#if HC_ARCH_BITS == 32
6010 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
6011 if (pVmcsInfo->pfnStartVM != VMXR0SwitcherStartVM64)
6012 {
6013#ifdef VBOX_STRICT
6014 if (pVmcsInfo->pfnStartVM != NULL) /* Very first VM-entry would have saved host-state already, ignore it. */
6015 {
6016 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
6017 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
6018 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
6019 AssertMsg(fCtxChanged & (HM_CHANGED_VMX_ENTRY_EXIT_CTLS | HM_CHANGED_GUEST_EFER_MSR),
6020 ("fCtxChanged=%#RX64\n", fCtxChanged));
6021 }
6022#endif
6023 pVmcsInfo->pfnStartVM = VMXR0SwitcherStartVM64;
6024
6025 /* Mark that we've switched to 64-bit handler, we can't safely switch back to 32-bit for
6026 the rest of the VM run (until VM reset). See @bugref{8432#c7}. */
6027 pVmcsInfo->fSwitchedTo64on32 = true;
6028 Log4Func(("Selected 64-bit switcher\n"));
6029 }
6030#else
6031 /* 64-bit host. */
6032 pVmcsInfo->pfnStartVM = VMXR0StartVM64;
6033#endif
6034 }
6035 else
6036 {
6037 /* Guest is not in long mode, use the 32-bit handler. */
6038#if HC_ARCH_BITS == 32
6039 if ( pVmcsInfo->pfnStartVM != VMXR0StartVM32
6040 && !pVmcsInfo->fSwitchedTo64on32 /* If set, guest mode change does not imply switcher change. */
6041 && pVmcsInfo->pfnStartVM != NULL) /* Very first VM-entry would have saved host-state already, ignore it. */
6042 {
6043# ifdef VBOX_STRICT
6044 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
6045 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
6046 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
6047 AssertMsg(fCtxChanged & (HM_CHANGED_VMX_ENTRY_EXIT_CTLS | HM_CHANGED_GUEST_EFER_MSR),
6048 ("fCtxChanged=%#RX64\n", fCtxChanged));
6049# endif
6050 }
6051# ifdef VBOX_ENABLE_64_BITS_GUESTS
6052 /*
6053 * Keep using the 64-bit switcher even though we're in 32-bit because of bad Intel
6054 * design, see @bugref{8432#c7}. If real-on-v86 mode is active, clear the 64-bit
6055 * switcher flag now because we know the guest is in a sane state where it's safe
6056 * to use the 32-bit switcher. Otherwise, check the guest state if it's safe to use
6057 * the much faster 32-bit switcher again.
6058 */
6059 if (!pVmcsInfo->fSwitchedTo64on32)
6060 {
6061 if (pVmcsInfo->pfnStartVM != VMXR0StartVM32)
6062 Log4Func(("Selected 32-bit switcher\n"));
6063 pVmcsInfo->pfnStartVM = VMXR0StartVM32;
6064 }
6065 else
6066 {
6067 Assert(pVmcsInfo->pfnStartVM == VMXR0SwitcherStartVM64);
6068 if ( pVmcsInfo->RealMode.fRealOnV86Active
6069 || hmR0VmxIs32BitSwitcherSafe(pCtx))
6070 {
6071 pVmcsInfo->fSwitchedTo64on32 = false;
6072 pVmcsInfo->pfnStartVM = VMXR0StartVM32;
6073 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR
6074 | HM_CHANGED_VMX_ENTRY_EXIT_CTLS
6075 | HM_CHANGED_HOST_CONTEXT);
6076 Log4Func(("Selected 32-bit switcher (safe)\n"));
6077 }
6078 }
6079# else
6080 pVmcsInfo->pfnStartVM = VMXR0StartVM32;
6081# endif
6082#else
6083 pVmcsInfo->pfnStartVM = VMXR0StartVM32;
6084#endif
6085 }
6086 Assert(pVmcsInfo->pfnStartVM);
6087 return VINF_SUCCESS;
6088}
6089
6090
6091/**
6092 * Wrapper for running the guest code in VT-x.
6093 *
6094 * @returns VBox status code, no informational status codes.
6095 * @param pVCpu The cross context virtual CPU structure.
6096 * @param pVmxTransient The VMX-transient structure.
6097 *
6098 * @remarks No-long-jump zone!!!
6099 */
6100DECLINLINE(int) hmR0VmxRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
6101{
6102 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
6103 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6104 pCtx->fExtrn |= HMVMX_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
6105
6106 /** @todo Add stats for VMRESUME vs VMLAUNCH. */
6107
6108 /*
6109 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses
6110 * floating-point operations using SSE instructions. Some XMM registers (XMM6-XMM15) are
6111 * callee-saved and thus the need for this XMM wrapper.
6112 *
6113 * See MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage".
6114 */
6115 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6116 bool const fResumeVM = RT_BOOL(pVmcsInfo->fVmcsState & VMX_V_VMCS_LAUNCH_STATE_LAUNCHED);
6117 PVM pVM = pVCpu->CTX_SUFF(pVM);
6118#ifdef VBOX_WITH_KERNEL_USING_XMM
6119 int rc = hmR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VmcsCache, pVM, pVCpu, pVmcsInfo->pfnStartVM);
6120#else
6121 int rc = pVmcsInfo->pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VmcsCache, pVM, pVCpu);
6122#endif
6123 AssertMsg(rc <= VINF_SUCCESS, ("%Rrc\n", rc));
6124 return rc;
6125}
6126
6127
6128/**
6129 * Reports world-switch error and dumps some useful debug info.
6130 *
6131 * @param pVCpu The cross context virtual CPU structure.
6132 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
6133 * @param pVmxTransient The VMX-transient structure (only
6134 * exitReason updated).
6135 */
6136static void hmR0VmxReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun, PVMXTRANSIENT pVmxTransient)
6137{
6138 Assert(pVCpu);
6139 Assert(pVmxTransient);
6140 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
6141
6142 Log4Func(("VM-entry failure: %Rrc\n", rcVMRun));
6143 switch (rcVMRun)
6144 {
6145 case VERR_VMX_INVALID_VMXON_PTR:
6146 AssertFailed();
6147 break;
6148 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
6149 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
6150 {
6151 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
6152 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
6153 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
6154 AssertRC(rc);
6155
6156 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
6157 /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted().
6158 Cannot do it here as we may have been long preempted. */
6159
6160#ifdef VBOX_STRICT
6161 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
6162 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
6163 pVmxTransient->uExitReason));
6164 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQual));
6165 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
6166 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
6167 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
6168 else
6169 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
6170 Log4(("Entered host CPU %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));
6171 Log4(("Current host CPU %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));
6172
6173 /* VMX control bits. */
6174 uint32_t u32Val;
6175 uint64_t u64Val;
6176 RTHCUINTREG uHCReg;
6177 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
6178 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
6179 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
6180 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
6181 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
6182 {
6183 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
6184 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
6185 }
6186 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
6187 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
6188 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
6189 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
6190 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
6191 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
6192 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
6193 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
6194 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
6195 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
6196 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
6197 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
6198 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
6199 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
6200 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
6201 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
6202 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
6203 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
6204 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
6205 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
6206 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
6207 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
6208 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
6209 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
6210 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
6211 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
6212 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
6213 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
6214 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
6215 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
6216 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
6217 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
6218 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
6219 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
6220 if (pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
6221 {
6222 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
6223 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
6224 }
6225
6226 /* Guest bits. */
6227 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc);
6228 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pVCpu->cpum.GstCtx.rip, u64Val));
6229 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc);
6230 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pVCpu->cpum.GstCtx.rsp, u64Val));
6231 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
6232 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pVCpu->cpum.GstCtx.eflags.u32, u32Val));
6233 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid)
6234 {
6235 rc = VMXReadVmcs32(VMX_VMCS16_VPID, &u32Val); AssertRC(rc);
6236 Log4(("VMX_VMCS16_VPID %u\n", u32Val));
6237 }
6238
6239 /* Host bits. */
6240 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
6241 Log4(("Host CR0 %#RHr\n", uHCReg));
6242 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
6243 Log4(("Host CR3 %#RHr\n", uHCReg));
6244 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
6245 Log4(("Host CR4 %#RHr\n", uHCReg));
6246
6247 RTGDTR HostGdtr;
6248 PCX86DESCHC pDesc;
6249 ASMGetGDTR(&HostGdtr);
6250 rc = VMXReadVmcs32(VMX_VMCS16_HOST_CS_SEL, &u32Val); AssertRC(rc);
6251 Log4(("Host CS %#08x\n", u32Val));
6252 if (u32Val < HostGdtr.cbGdt)
6253 {
6254 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
6255 hmR0DumpDescriptor(pDesc, u32Val, "CS: ");
6256 }
6257
6258 rc = VMXReadVmcs32(VMX_VMCS16_HOST_DS_SEL, &u32Val); AssertRC(rc);
6259 Log4(("Host DS %#08x\n", u32Val));
6260 if (u32Val < HostGdtr.cbGdt)
6261 {
6262 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
6263 hmR0DumpDescriptor(pDesc, u32Val, "DS: ");
6264 }
6265
6266 rc = VMXReadVmcs32(VMX_VMCS16_HOST_ES_SEL, &u32Val); AssertRC(rc);
6267 Log4(("Host ES %#08x\n", u32Val));
6268 if (u32Val < HostGdtr.cbGdt)
6269 {
6270 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
6271 hmR0DumpDescriptor(pDesc, u32Val, "ES: ");
6272 }
6273
6274 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FS_SEL, &u32Val); AssertRC(rc);
6275 Log4(("Host FS %#08x\n", u32Val));
6276 if (u32Val < HostGdtr.cbGdt)
6277 {
6278 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
6279 hmR0DumpDescriptor(pDesc, u32Val, "FS: ");
6280 }
6281
6282 rc = VMXReadVmcs32(VMX_VMCS16_HOST_GS_SEL, &u32Val); AssertRC(rc);
6283 Log4(("Host GS %#08x\n", u32Val));
6284 if (u32Val < HostGdtr.cbGdt)
6285 {
6286 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
6287 hmR0DumpDescriptor(pDesc, u32Val, "GS: ");
6288 }
6289
6290 rc = VMXReadVmcs32(VMX_VMCS16_HOST_SS_SEL, &u32Val); AssertRC(rc);
6291 Log4(("Host SS %#08x\n", u32Val));
6292 if (u32Val < HostGdtr.cbGdt)
6293 {
6294 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
6295 hmR0DumpDescriptor(pDesc, u32Val, "SS: ");
6296 }
6297
6298 rc = VMXReadVmcs32(VMX_VMCS16_HOST_TR_SEL, &u32Val); AssertRC(rc);
6299 Log4(("Host TR %#08x\n", u32Val));
6300 if (u32Val < HostGdtr.cbGdt)
6301 {
6302 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
6303 hmR0DumpDescriptor(pDesc, u32Val, "TR: ");
6304 }
6305
6306 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
6307 Log4(("Host TR Base %#RHv\n", uHCReg));
6308 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
6309 Log4(("Host GDTR Base %#RHv\n", uHCReg));
6310 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
6311 Log4(("Host IDTR Base %#RHv\n", uHCReg));
6312 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
6313 Log4(("Host SYSENTER CS %#08x\n", u32Val));
6314 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
6315 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg));
6316 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
6317 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg));
6318 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
6319 Log4(("Host RSP %#RHv\n", uHCReg));
6320 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
6321 Log4(("Host RIP %#RHv\n", uHCReg));
6322# if HC_ARCH_BITS == 64
6323 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
6324 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
6325 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
6326 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
6327 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
6328 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
6329# endif
6330#endif /* VBOX_STRICT */
6331 break;
6332 }
6333
6334 default:
6335 /* Impossible */
6336 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
6337 break;
6338 }
6339}
6340
6341
6342#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
6343# ifndef VMX_USE_CACHED_VMCS_ACCESSES
6344# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
6345# endif
6346
6347/**
6348 * Initialize the VMCS-Read cache.
6349 *
6350 * The VMCS cache is used for 32-bit hosts running 64-bit guests (except 32-bit
6351 * Darwin which runs with 64-bit paging in 32-bit mode) for 64-bit fields that
6352 * cannot be accessed in 32-bit mode. Some 64-bit fields -can- be accessed
6353 * (those that have a 32-bit FULL & HIGH part).
6354 *
6355 * @param pVCpu The cross context virtual CPU structure.
6356 */
6357static void hmR0VmxInitVmcsReadCache(PVMCPU pVCpu)
6358{
6359#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
6360 do { \
6361 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
6362 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
6363 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
6364 ++cReadFields; \
6365 } while (0)
6366
6367 PVMXVMCSCACHE pCache = &pVCpu->hm.s.vmx.VmcsCache;
6368 uint32_t cReadFields = 0;
6369
6370 /*
6371 * Don't remove the #if 0'd fields in this code. They're listed here for consistency
6372 * and serve to indicate exceptions to the rules.
6373 */
6374
6375 /* Guest-natural selector base fields. */
6376#if 0
6377 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
6378 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
6379 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
6380#endif
6381 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
6382 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
6383 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
6384 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
6385 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
6386 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
6387 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
6388 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
6389 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
6390 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
6391 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
6392 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
6393#if 0
6394 /* Unused natural width guest-state fields. */
6395 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS);
6396 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in nested paging case */
6397#endif
6398 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
6399 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
6400
6401 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for
6402 these 64-bit fields (using "FULL" and "HIGH" fields). */
6403#if 0
6404 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
6405 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
6406 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
6407 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
6408 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
6409 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
6410 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
6411 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
6412 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
6413#endif
6414
6415 /* Natural width guest-state fields. */
6416 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
6417 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_GUEST_LINEAR_ADDR);
6418
6419 if (pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
6420 {
6421 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
6422 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
6423 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
6424 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
6425 }
6426 else
6427 {
6428 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
6429 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
6430 }
6431
6432#undef VMXLOCAL_INIT_READ_CACHE_FIELD
6433}
6434
6435
6436/**
6437 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
6438 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
6439 * darwin, running 64-bit guests).
6440 *
6441 * @returns VBox status code.
6442 * @param pVCpu The cross context virtual CPU structure.
6443 * @param idxField The VMCS field encoding.
6444 * @param u64Val 16, 32 or 64-bit value.
6445 */
6446VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
6447{
6448 int rc;
6449 switch (idxField)
6450 {
6451 /*
6452 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
6453 */
6454 /* 64-bit Control fields. */
6455 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
6456 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
6457 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
6458 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
6459 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
6460 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
6461 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
6462 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
6463 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
6464 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
6465 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
6466 case VMX_VMCS64_CTRL_EPTP_FULL:
6467 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
6468 /* 64-bit Guest-state fields. */
6469 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
6470 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
6471 case VMX_VMCS64_GUEST_PAT_FULL:
6472 case VMX_VMCS64_GUEST_EFER_FULL:
6473 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
6474 case VMX_VMCS64_GUEST_PDPTE0_FULL:
6475 case VMX_VMCS64_GUEST_PDPTE1_FULL:
6476 case VMX_VMCS64_GUEST_PDPTE2_FULL:
6477 case VMX_VMCS64_GUEST_PDPTE3_FULL:
6478 /* 64-bit Host-state fields. */
6479 case VMX_VMCS64_HOST_PAT_FULL:
6480 case VMX_VMCS64_HOST_EFER_FULL:
6481 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
6482 {
6483 rc = VMXWriteVmcs32(idxField, RT_LO_U32(u64Val));
6484 rc |= VMXWriteVmcs32(idxField + 1, RT_HI_U32(u64Val));
6485 break;
6486 }
6487
6488 /*
6489 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
6490 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
6491 */
6492 /* Natural-width Guest-state fields. */
6493 case VMX_VMCS_GUEST_CR3:
6494 case VMX_VMCS_GUEST_ES_BASE:
6495 case VMX_VMCS_GUEST_CS_BASE:
6496 case VMX_VMCS_GUEST_SS_BASE:
6497 case VMX_VMCS_GUEST_DS_BASE:
6498 case VMX_VMCS_GUEST_FS_BASE:
6499 case VMX_VMCS_GUEST_GS_BASE:
6500 case VMX_VMCS_GUEST_LDTR_BASE:
6501 case VMX_VMCS_GUEST_TR_BASE:
6502 case VMX_VMCS_GUEST_GDTR_BASE:
6503 case VMX_VMCS_GUEST_IDTR_BASE:
6504 case VMX_VMCS_GUEST_RSP:
6505 case VMX_VMCS_GUEST_RIP:
6506 case VMX_VMCS_GUEST_SYSENTER_ESP:
6507 case VMX_VMCS_GUEST_SYSENTER_EIP:
6508 {
6509 if (!(RT_HI_U32(u64Val)))
6510 {
6511 /* If this field is 64-bit, VT-x will zero out the top bits. */
6512 rc = VMXWriteVmcs32(idxField, RT_LO_U32(u64Val));
6513 }
6514 else
6515 {
6516 /* Assert that only the 32->64 switcher case should ever come here. */
6517 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
6518 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
6519 }
6520 break;
6521 }
6522
6523 default:
6524 {
6525 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
6526 pVCpu->hm.s.u32HMError = idxField;
6527 rc = VERR_INVALID_PARAMETER;
6528 break;
6529 }
6530 }
6531 AssertRCReturn(rc, rc);
6532 return rc;
6533}
6534
6535
6536/**
6537 * Queue up a VMWRITE by using the VMCS write cache.
6538 * This is only used on 32-bit hosts (except darwin) for 64-bit guests.
6539 *
6540 * @param pVCpu The cross context virtual CPU structure.
6541 * @param idxField The VMCS field encoding.
6542 * @param u64Val 16, 32 or 64-bit value.
6543 */
6544VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
6545{
6546 AssertPtr(pVCpu);
6547 PVMXVMCSCACHE pCache = &pVCpu->hm.s.vmx.VmcsCache;
6548
6549 AssertMsgReturn(pCache->Write.cValidEntries < VMX_VMCS_CACHE_MAX_ENTRY - 1,
6550 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
6551
6552 /* Make sure there are no duplicates. */
6553 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
6554 {
6555 if (pCache->Write.aField[i] == idxField)
6556 {
6557 pCache->Write.aFieldVal[i] = u64Val;
6558 return VINF_SUCCESS;
6559 }
6560 }
6561
6562 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
6563 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
6564 pCache->Write.cValidEntries++;
6565 return VINF_SUCCESS;
6566}
6567#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
6568
6569
6570/**
6571 * Sets up the usage of TSC-offsetting and updates the VMCS.
6572 *
6573 * If offsetting is not possible, cause VM-exits on RDTSC(P)s. Also sets up the
6574 * VMX-preemption timer.
6575 *
6576 * @returns VBox status code.
6577 * @param pVCpu The cross context virtual CPU structure.
6578 * @param pVmxTransient The VMX-transient structure.
6579 *
6580 * @remarks No-long-jump zone!!!
6581 */
6582static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
6583{
6584 bool fOffsettedTsc;
6585 bool fParavirtTsc;
6586 uint64_t uTscOffset;
6587 PVM pVM = pVCpu->CTX_SUFF(pVM);
6588 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
6589
6590 if (pVM->hm.s.vmx.fUsePreemptTimer)
6591 {
6592 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &uTscOffset, &fOffsettedTsc, &fParavirtTsc);
6593
6594 /* Make sure the returned values have sane upper and lower boundaries. */
6595 uint64_t u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
6596 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
6597 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
6598 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
6599
6600 /** @todo r=ramshankar: We need to find a way to integrate nested-guest
6601 * preemption timers here. We probably need to clamp the preemption timer,
6602 * after converting the timer value to the host. */
6603 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
6604 int rc = VMXWriteVmcs32(VMX_VMCS32_PREEMPT_TIMER_VALUE, cPreemptionTickCount);
6605 AssertRC(rc);
6606 }
6607 else
6608 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
6609
6610 if (fParavirtTsc)
6611 {
6612 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
6613 information before every VM-entry, hence disable it for performance sake. */
6614#if 0
6615 int rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
6616 AssertRC(rc);
6617#endif
6618 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
6619 }
6620
6621 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
6622 if ( fOffsettedTsc
6623 && RT_LIKELY(!pVCpu->hm.s.fDebugWantRdTscExit))
6624 {
6625 if (pVmxTransient->fIsNestedGuest)
6626 uTscOffset = CPUMApplyNestedGuestTscOffset(pVCpu, uTscOffset);
6627 if (pVmcsInfo->u64TscOffset != uTscOffset)
6628 {
6629 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, uTscOffset);
6630 AssertRC(rc);
6631 pVmcsInfo->u64TscOffset = uTscOffset;
6632 }
6633
6634 if (uProcCtls & VMX_PROC_CTLS_RDTSC_EXIT)
6635 {
6636 uProcCtls &= ~VMX_PROC_CTLS_RDTSC_EXIT;
6637 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
6638 AssertRC(rc);
6639 pVmcsInfo->u32ProcCtls = uProcCtls;
6640 }
6641 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
6642 }
6643 else
6644 {
6645 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
6646 if (!(uProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))
6647 {
6648 uProcCtls |= VMX_PROC_CTLS_RDTSC_EXIT;
6649 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
6650 AssertRC(rc);
6651 pVmcsInfo->u32ProcCtls = uProcCtls;
6652 }
6653 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
6654 }
6655}
6656
6657
6658/**
6659 * Gets the IEM exception flags for the specified vector and IDT vectoring /
6660 * VM-exit interruption info type.
6661 *
6662 * @returns The IEM exception flags.
6663 * @param uVector The event vector.
6664 * @param uVmxEventType The VMX event type.
6665 *
6666 * @remarks This function currently only constructs flags required for
6667 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
6668 * and CR2 aspects of an exception are not included).
6669 */
6670static uint32_t hmR0VmxGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
6671{
6672 uint32_t fIemXcptFlags;
6673 switch (uVmxEventType)
6674 {
6675 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
6676 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
6677 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
6678 break;
6679
6680 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
6681 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
6682 break;
6683
6684 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
6685 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
6686 break;
6687
6688 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
6689 {
6690 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
6691 if (uVector == X86_XCPT_BP)
6692 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
6693 else if (uVector == X86_XCPT_OF)
6694 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
6695 else
6696 {
6697 fIemXcptFlags = 0;
6698 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
6699 }
6700 break;
6701 }
6702
6703 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
6704 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
6705 break;
6706
6707 default:
6708 fIemXcptFlags = 0;
6709 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
6710 break;
6711 }
6712 return fIemXcptFlags;
6713}
6714
6715
6716/**
6717 * Sets an event as a pending event to be injected into the guest.
6718 *
6719 * @param pVCpu The cross context virtual CPU structure.
6720 * @param u32IntInfo The VM-entry interruption-information field.
6721 * @param cbInstr The VM-entry instruction length in bytes (for software
6722 * interrupts, exceptions and privileged software
6723 * exceptions).
6724 * @param u32ErrCode The VM-entry exception error code.
6725 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
6726 * page-fault.
6727 */
6728DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
6729 RTGCUINTPTR GCPtrFaultAddress)
6730{
6731 Assert(!pVCpu->hm.s.Event.fPending);
6732 pVCpu->hm.s.Event.fPending = true;
6733 pVCpu->hm.s.Event.u64IntInfo = u32IntInfo;
6734 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
6735 pVCpu->hm.s.Event.cbInstr = cbInstr;
6736 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
6737}
6738
6739
6740/**
6741 * Sets an external interrupt as pending-for-injection into the VM.
6742 *
6743 * @param pVCpu The cross context virtual CPU structure.
6744 * @param u8Interrupt The external interrupt vector.
6745 */
6746DECLINLINE(void) hmR0VmxSetPendingExtInt(PVMCPU pVCpu, uint8_t u8Interrupt)
6747{
6748 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
6749 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
6750 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
6751 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
6752 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6753}
6754
6755
6756/**
6757 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
6758 *
6759 * @param pVCpu The cross context virtual CPU structure.
6760 */
6761DECLINLINE(void) hmR0VmxSetPendingXcptNmi(PVMCPU pVCpu)
6762{
6763 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
6764 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
6765 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
6766 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
6767 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6768}
6769
6770
6771/**
6772 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
6773 *
6774 * @param pVCpu The cross context virtual CPU structure.
6775 */
6776DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu)
6777{
6778 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
6779 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
6780 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
6781 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
6782 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6783}
6784
6785
6786/**
6787 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
6788 *
6789 * @param pVCpu The cross context virtual CPU structure.
6790 */
6791DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu)
6792{
6793 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
6794 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
6795 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
6796 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
6797 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6798}
6799
6800
6801/**
6802 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
6803 *
6804 * @param pVCpu The cross context virtual CPU structure.
6805 */
6806DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu)
6807{
6808 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
6809 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
6810 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
6811 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
6812 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6813}
6814
6815
6816#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6817/**
6818 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
6819 *
6820 * @param pVCpu The cross context virtual CPU structure.
6821 * @param u32ErrCode The error code for the general-protection exception.
6822 */
6823DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, uint32_t u32ErrCode)
6824{
6825 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
6826 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
6827 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
6828 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
6829 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
6830}
6831
6832
6833/**
6834 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
6835 *
6836 * @param pVCpu The cross context virtual CPU structure.
6837 * @param u32ErrCode The error code for the stack exception.
6838 */
6839DECLINLINE(void) hmR0VmxSetPendingXcptSS(PVMCPU pVCpu, uint32_t u32ErrCode)
6840{
6841 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
6842 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
6843 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
6844 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
6845 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
6846}
6847
6848
6849/**
6850 * Decodes the memory operand of an instruction that caused a VM-exit.
6851 *
6852 * The VM-exit qualification field provides the displacement field for memory
6853 * operand instructions, if any.
6854 *
6855 * @returns Strict VBox status code (i.e. informational status codes too).
6856 * @retval VINF_SUCCESS if the operand was successfully decoded.
6857 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6858 * operand.
6859 * @param pVCpu The cross context virtual CPU structure.
6860 * @param uExitInstrInfo The VM-exit instruction information field.
6861 * @param enmMemAccess The memory operand's access type (read or write).
6862 * @param GCPtrDisp The instruction displacement field, if any. For
6863 * RIP-relative addressing pass RIP + displacement here.
6864 * @param pGCPtrMem Where to store the effective destination memory address.
6865 *
6866 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
6867 * virtual-8086 mode hence skips those checks while verifying if the
6868 * segment is valid.
6869 */
6870static VBOXSTRICTRC hmR0VmxDecodeMemOperand(PVMCPU pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6871 PRTGCPTR pGCPtrMem)
6872{
6873 Assert(pGCPtrMem);
6874 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6875 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6876 | CPUMCTX_EXTRN_CR0);
6877
6878 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6879 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6880 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6881
6882 VMXEXITINSTRINFO ExitInstrInfo;
6883 ExitInstrInfo.u = uExitInstrInfo;
6884 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6885 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6886 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6887 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6888 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6889 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6890 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6891 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6892 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6893
6894 /*
6895 * Validate instruction information.
6896 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6897 */
6898 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6899 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6900 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6901 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6902 AssertLogRelMsgReturn(fIsMemOperand,
6903 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6904
6905 /*
6906 * Compute the complete effective address.
6907 *
6908 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6909 * See AMD spec. 4.5.2 "Segment Registers".
6910 */
6911 RTGCPTR GCPtrMem = GCPtrDisp;
6912 if (fBaseRegValid)
6913 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6914 if (fIdxRegValid)
6915 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6916
6917 RTGCPTR const GCPtrOff = GCPtrMem;
6918 if ( !fIsLongMode
6919 || iSegReg >= X86_SREG_FS)
6920 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6921 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6922
6923 /*
6924 * Validate effective address.
6925 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6926 */
6927 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6928 Assert(cbAccess > 0);
6929 if (fIsLongMode)
6930 {
6931 if (X86_IS_CANONICAL(GCPtrMem))
6932 {
6933 *pGCPtrMem = GCPtrMem;
6934 return VINF_SUCCESS;
6935 }
6936
6937 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6938 * "Data Limit Checks in 64-bit Mode". */
6939 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6940 hmR0VmxSetPendingXcptGP(pVCpu, 0);
6941 return VINF_HM_PENDING_XCPT;
6942 }
6943
6944 /*
6945 * This is a watered down version of iemMemApplySegment().
6946 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6947 * and segment CPL/DPL checks are skipped.
6948 */
6949 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6950 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6951 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6952
6953 /* Check if the segment is present and usable. */
6954 if ( pSel->Attr.n.u1Present
6955 && !pSel->Attr.n.u1Unusable)
6956 {
6957 Assert(pSel->Attr.n.u1DescType);
6958 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6959 {
6960 /* Check permissions for the data segment. */
6961 if ( enmMemAccess == VMXMEMACCESS_WRITE
6962 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6963 {
6964 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6965 hmR0VmxSetPendingXcptGP(pVCpu, iSegReg);
6966 return VINF_HM_PENDING_XCPT;
6967 }
6968
6969 /* Check limits if it's a normal data segment. */
6970 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6971 {
6972 if ( GCPtrFirst32 > pSel->u32Limit
6973 || GCPtrLast32 > pSel->u32Limit)
6974 {
6975 Log4Func(("Data segment limit exceeded. "
6976 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6977 GCPtrLast32, pSel->u32Limit));
6978 if (iSegReg == X86_SREG_SS)
6979 hmR0VmxSetPendingXcptSS(pVCpu, 0);
6980 else
6981 hmR0VmxSetPendingXcptGP(pVCpu, 0);
6982 return VINF_HM_PENDING_XCPT;
6983 }
6984 }
6985 else
6986 {
6987 /* Check limits if it's an expand-down data segment.
6988 Note! The upper boundary is defined by the B bit, not the G bit! */
6989 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6990 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6991 {
6992 Log4Func(("Expand-down data segment limit exceeded. "
6993 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6994 GCPtrLast32, pSel->u32Limit));
6995 if (iSegReg == X86_SREG_SS)
6996 hmR0VmxSetPendingXcptSS(pVCpu, 0);
6997 else
6998 hmR0VmxSetPendingXcptGP(pVCpu, 0);
6999 return VINF_HM_PENDING_XCPT;
7000 }
7001 }
7002 }
7003 else
7004 {
7005 /* Check permissions for the code segment. */
7006 if ( enmMemAccess == VMXMEMACCESS_WRITE
7007 || ( enmMemAccess == VMXMEMACCESS_READ
7008 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
7009 {
7010 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
7011 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
7012 hmR0VmxSetPendingXcptGP(pVCpu, 0);
7013 return VINF_HM_PENDING_XCPT;
7014 }
7015
7016 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
7017 if ( GCPtrFirst32 > pSel->u32Limit
7018 || GCPtrLast32 > pSel->u32Limit)
7019 {
7020 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
7021 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
7022 if (iSegReg == X86_SREG_SS)
7023 hmR0VmxSetPendingXcptSS(pVCpu, 0);
7024 else
7025 hmR0VmxSetPendingXcptGP(pVCpu, 0);
7026 return VINF_HM_PENDING_XCPT;
7027 }
7028 }
7029 }
7030 else
7031 {
7032 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
7033 hmR0VmxSetPendingXcptGP(pVCpu, 0);
7034 return VINF_HM_PENDING_XCPT;
7035 }
7036
7037 *pGCPtrMem = GCPtrMem;
7038 return VINF_SUCCESS;
7039}
7040
7041
7042/**
7043 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
7044 * guest attempting to execute a VMX instruction.
7045 *
7046 * @returns Strict VBox status code (i.e. informational status codes too).
7047 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
7048 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
7049 *
7050 * @param pVCpu The cross context virtual CPU structure.
7051 * @param uExitReason The VM-exit reason.
7052 *
7053 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
7054 * @remarks No-long-jump zone!!!
7055 */
7056static VBOXSTRICTRC hmR0VmxCheckExitDueToVmxInstr(PVMCPU pVCpu, uint32_t uExitReason)
7057{
7058 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
7059 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
7060
7061 /*
7062 * The physical CPU would have already checked the CPU mode/code segment.
7063 * We shall just assert here for paranoia.
7064 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
7065 */
7066 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
7067 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
7068 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
7069
7070 if (uExitReason == VMX_EXIT_VMXON)
7071 {
7072 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
7073
7074 /*
7075 * We check CR4.VMXE because it is required to be always set while in VMX operation
7076 * by physical CPUs and our CR4 read shadow is only consulted when executing specific
7077 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
7078 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
7079 */
7080 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
7081 {
7082 Log4Func(("CR4.VMXE is not set -> #UD\n"));
7083 hmR0VmxSetPendingXcptUD(pVCpu);
7084 return VINF_HM_PENDING_XCPT;
7085 }
7086 }
7087 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
7088 {
7089 /*
7090 * The guest has not entered VMX operation but attempted to execute a VMX instruction
7091 * (other than VMXON), we need to raise a #UD.
7092 */
7093 Log4Func(("Not in VMX root mode -> #UD\n"));
7094 hmR0VmxSetPendingXcptUD(pVCpu);
7095 return VINF_HM_PENDING_XCPT;
7096 }
7097
7098 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
7099 return VINF_SUCCESS;
7100}
7101#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
7102
7103
7104static void hmR0VmxFixUnusableSegRegAttr(PVMCPU pVCpu, PCPUMSELREG pSelReg, uint32_t idxSel)
7105{
7106 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
7107
7108 /*
7109 * If VT-x marks the segment as unusable, most other bits remain undefined:
7110 * - For CS the L, D and G bits have meaning.
7111 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
7112 * - For the remaining data segments no bits are defined.
7113 *
7114 * The present bit and the unusable bit has been observed to be set at the
7115 * same time (the selector was supposed to be invalid as we started executing
7116 * a V8086 interrupt in ring-0).
7117 *
7118 * What should be important for the rest of the VBox code, is that the P bit is
7119 * cleared. Some of the other VBox code recognizes the unusable bit, but
7120 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
7121 * safe side here, we'll strip off P and other bits we don't care about. If
7122 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
7123 *
7124 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
7125 */
7126#ifdef VBOX_STRICT
7127 uint32_t const uAttr = pSelReg->Attr.u;
7128#endif
7129
7130 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
7131 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
7132 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
7133
7134#ifdef VBOX_STRICT
7135 VMMRZCallRing3Disable(pVCpu);
7136 Log4Func(("Unusable %#x: sel=%#x attr=%#x -> %#x\n", idxSel, pSelReg->Sel, uAttr, pSelReg->Attr.u));
7137# ifdef DEBUG_bird
7138 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
7139 ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
7140 idxSel, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
7141# endif
7142 VMMRZCallRing3Enable(pVCpu);
7143 NOREF(uAttr);
7144#endif
7145 RT_NOREF2(pVCpu, idxSel);
7146}
7147
7148
7149/**
7150 * Imports a guest segment register from the current VMCS into the guest-CPU
7151 * context.
7152 *
7153 * @returns VBox status code.
7154 * @param pVCpu The cross context virtual CPU structure.
7155 * @param iSegReg The segment register number (X86_SREG_XXX).
7156 *
7157 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
7158 * do not log!
7159 */
7160static int hmR0VmxImportGuestSegReg(PVMCPU pVCpu, uint8_t iSegReg)
7161{
7162 Assert(iSegReg < X86_SREG_COUNT);
7163
7164 uint32_t const idxSel = g_aVmcsSegSel[iSegReg];
7165 uint32_t const idxLimit = g_aVmcsSegLimit[iSegReg];
7166 uint32_t const idxAttr = g_aVmcsSegAttr[iSegReg];
7167#ifdef VMX_USE_CACHED_VMCS_ACCESSES
7168 uint32_t const idxBase = g_aVmcsCacheSegBase[iSegReg];
7169#else
7170 uint32_t const idxBase = g_aVmcsSegBase[iSegReg];
7171#endif
7172 uint64_t u64Base;
7173 uint32_t u32Sel, u32Limit, u32Attr;
7174 int rc = VMXReadVmcs32(idxSel, &u32Sel);
7175 rc |= VMXReadVmcs32(idxLimit, &u32Limit);
7176 rc |= VMXReadVmcs32(idxAttr, &u32Attr);
7177 rc |= VMXReadVmcsGstNByIdxVal(idxBase, &u64Base);
7178 if (RT_SUCCESS(rc))
7179 {
7180 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
7181 pSelReg->Sel = u32Sel;
7182 pSelReg->ValidSel = u32Sel;
7183 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
7184 pSelReg->u32Limit = u32Limit;
7185 pSelReg->u64Base = u64Base;
7186 pSelReg->Attr.u = u32Attr;
7187 if (u32Attr & X86DESCATTR_UNUSABLE)
7188 hmR0VmxFixUnusableSegRegAttr(pVCpu, pSelReg, idxSel);
7189 }
7190 return rc;
7191}
7192
7193
7194/**
7195 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
7196 *
7197 * @returns VBox status code.
7198 * @param pVCpu The cross context virtual CPU structure.
7199 *
7200 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
7201 * do not log!
7202 */
7203static int hmR0VmxImportGuestLdtr(PVMCPU pVCpu)
7204{
7205 uint64_t u64Base;
7206 uint32_t u32Sel, u32Limit, u32Attr;
7207 int rc = VMXReadVmcs32(VMX_VMCS16_GUEST_LDTR_SEL, &u32Sel);
7208 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit);
7209 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr);
7210 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, &u64Base);
7211 if (RT_SUCCESS(rc))
7212 {
7213 pVCpu->cpum.GstCtx.ldtr.Sel = u32Sel;
7214 pVCpu->cpum.GstCtx.ldtr.ValidSel = u32Sel;
7215 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
7216 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
7217 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
7218 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
7219 if (u32Attr & X86DESCATTR_UNUSABLE)
7220 hmR0VmxFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, VMX_VMCS16_GUEST_LDTR_SEL);
7221 }
7222 return rc;
7223}
7224
7225
7226/**
7227 * Imports the guest TR from the current VMCS into the guest-CPU context.
7228 *
7229 * @returns VBox status code.
7230 * @param pVCpu The cross context virtual CPU structure.
7231 *
7232 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
7233 * do not log!
7234 */
7235static int hmR0VmxImportGuestTr(PVMCPU pVCpu)
7236{
7237 uint32_t u32Sel, u32Limit, u32Attr;
7238 uint64_t u64Base;
7239 int rc = VMXReadVmcs32(VMX_VMCS16_GUEST_TR_SEL, &u32Sel);
7240 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit);
7241 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr);
7242 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_TR_BASE, &u64Base);
7243 AssertRCReturn(rc, rc);
7244
7245 pVCpu->cpum.GstCtx.tr.Sel = u32Sel;
7246 pVCpu->cpum.GstCtx.tr.ValidSel = u32Sel;
7247 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
7248 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
7249 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
7250 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
7251 /* TR is the only selector that can never be unusable. */
7252 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
7253 return VINF_SUCCESS;
7254}
7255
7256
7257/**
7258 * Imports the guest RIP from the VMCS back into the guest-CPU context.
7259 *
7260 * @returns VBox status code.
7261 * @param pVCpu The cross context virtual CPU structure.
7262 *
7263 * @remarks Called with interrupts and/or preemption disabled, should not assert!
7264 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
7265 * instead!!!
7266 */
7267static int hmR0VmxImportGuestRip(PVMCPU pVCpu)
7268{
7269 uint64_t u64Val;
7270 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7271 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
7272 {
7273 int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
7274 if (RT_SUCCESS(rc))
7275 {
7276 pCtx->rip = u64Val;
7277 EMR0HistoryUpdatePC(pVCpu, pCtx->rip, false);
7278 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
7279 }
7280 return rc;
7281 }
7282 return VINF_SUCCESS;
7283}
7284
7285
7286/**
7287 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
7288 *
7289 * @returns VBox status code.
7290 * @param pVCpu The cross context virtual CPU structure.
7291 * @param pVmcsInfo The VMCS info. object.
7292 *
7293 * @remarks Called with interrupts and/or preemption disabled, should not assert!
7294 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
7295 * instead!!!
7296 */
7297static int hmR0VmxImportGuestRFlags(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
7298{
7299 uint32_t u32Val;
7300 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7301 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
7302 {
7303 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val);
7304 if (RT_SUCCESS(rc))
7305 {
7306 pCtx->eflags.u32 = u32Val;
7307
7308 /* Restore eflags for real-on-v86-mode hack. */
7309 if (pVmcsInfo->RealMode.fRealOnV86Active)
7310 {
7311 pCtx->eflags.Bits.u1VM = 0;
7312 pCtx->eflags.Bits.u2IOPL = pVmcsInfo->RealMode.Eflags.Bits.u2IOPL;
7313 }
7314 }
7315 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
7316 return rc;
7317 }
7318 return VINF_SUCCESS;
7319}
7320
7321
7322/**
7323 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
7324 * context.
7325 *
7326 * @returns VBox status code.
7327 * @param pVCpu The cross context virtual CPU structure.
7328 * @param pVmcsInfo The VMCS info. object.
7329 *
7330 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
7331 * do not log!
7332 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
7333 * instead!!!
7334 */
7335static int hmR0VmxImportGuestIntrState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
7336{
7337 uint32_t u32Val;
7338 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32Val);
7339 if (RT_SUCCESS(rc))
7340 {
7341 if (!u32Val)
7342 {
7343 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7344 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7345
7346 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
7347 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
7348 }
7349 else
7350 {
7351 /*
7352 * We must import RIP here to set our EM interrupt-inhibited state.
7353 * We also import RFLAGS as our code that evaluates pending interrupts
7354 * before VM-entry requires it.
7355 */
7356 rc = hmR0VmxImportGuestRip(pVCpu);
7357 rc |= hmR0VmxImportGuestRFlags(pVCpu, pVmcsInfo);
7358 if (RT_SUCCESS(rc))
7359 {
7360 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
7361 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
7362 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7363 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7364
7365 if (u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
7366 {
7367 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
7368 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
7369 }
7370 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
7371 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
7372 }
7373 }
7374 }
7375 return rc;
7376}
7377
7378
7379/**
7380 * Worker for VMXR0ImportStateOnDemand.
7381 *
7382 * @returns VBox status code.
7383 * @param pVCpu The cross context virtual CPU structure.
7384 * @param pVmcsInfo The VMCS info. object.
7385 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
7386 */
7387static int hmR0VmxImportGuestState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
7388{
7389#define VMXLOCAL_BREAK_RC(a_rc) \
7390 if (RT_SUCCESS(a_rc)) \
7391 { } \
7392 else \
7393 break
7394
7395 int rc = VINF_SUCCESS;
7396 PVM pVM = pVCpu->CTX_SUFF(pVM);
7397 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7398 uint64_t u64Val;
7399 uint32_t u32Val;
7400
7401 /*
7402 * Note! This is hack to workaround a mysterious BSOD observed with release builds
7403 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
7404 * neither are other host platforms.
7405 *
7406 * Committing this temporarily as it prevents BSOD.
7407 *
7408 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
7409 */
7410#ifdef RT_OS_WINDOWS
7411 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
7412 return VERR_HM_IPE_1;
7413#endif
7414
7415 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x);
7416
7417 /*
7418 * We disable interrupts to make the updating of the state and in particular
7419 * the fExtrn modification atomic wrt to preemption hooks.
7420 */
7421 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
7422
7423 fWhat &= pCtx->fExtrn;
7424 if (fWhat)
7425 {
7426 do
7427 {
7428 if (fWhat & CPUMCTX_EXTRN_RIP)
7429 {
7430 rc = hmR0VmxImportGuestRip(pVCpu);
7431 VMXLOCAL_BREAK_RC(rc);
7432 }
7433
7434 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
7435 {
7436 rc = hmR0VmxImportGuestRFlags(pVCpu, pVmcsInfo);
7437 VMXLOCAL_BREAK_RC(rc);
7438 }
7439
7440 if (fWhat & CPUMCTX_EXTRN_HM_VMX_INT_STATE)
7441 {
7442 rc = hmR0VmxImportGuestIntrState(pVCpu, pVmcsInfo);
7443 VMXLOCAL_BREAK_RC(rc);
7444 }
7445
7446 if (fWhat & CPUMCTX_EXTRN_RSP)
7447 {
7448 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
7449 VMXLOCAL_BREAK_RC(rc);
7450 pCtx->rsp = u64Val;
7451 }
7452
7453 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
7454 {
7455 bool const fRealOnV86Active = pVmcsInfo->RealMode.fRealOnV86Active;
7456 if (fWhat & CPUMCTX_EXTRN_CS)
7457 {
7458 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_CS);
7459 rc |= hmR0VmxImportGuestRip(pVCpu);
7460 if (fRealOnV86Active)
7461 pCtx->cs.Attr.u = pVmcsInfo->RealMode.AttrCS.u;
7462 EMR0HistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
7463 }
7464 if (fWhat & CPUMCTX_EXTRN_SS)
7465 {
7466 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_SS);
7467 if (fRealOnV86Active)
7468 pCtx->ss.Attr.u = pVmcsInfo->RealMode.AttrSS.u;
7469 }
7470 if (fWhat & CPUMCTX_EXTRN_DS)
7471 {
7472 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_DS);
7473 if (fRealOnV86Active)
7474 pCtx->ds.Attr.u = pVmcsInfo->RealMode.AttrDS.u;
7475 }
7476 if (fWhat & CPUMCTX_EXTRN_ES)
7477 {
7478 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_ES);
7479 if (fRealOnV86Active)
7480 pCtx->es.Attr.u = pVmcsInfo->RealMode.AttrES.u;
7481 }
7482 if (fWhat & CPUMCTX_EXTRN_FS)
7483 {
7484 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_FS);
7485 if (fRealOnV86Active)
7486 pCtx->fs.Attr.u = pVmcsInfo->RealMode.AttrFS.u;
7487 }
7488 if (fWhat & CPUMCTX_EXTRN_GS)
7489 {
7490 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_GS);
7491 if (fRealOnV86Active)
7492 pCtx->gs.Attr.u = pVmcsInfo->RealMode.AttrGS.u;
7493 }
7494 VMXLOCAL_BREAK_RC(rc);
7495 }
7496
7497 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
7498 {
7499 if (fWhat & CPUMCTX_EXTRN_LDTR)
7500 rc |= hmR0VmxImportGuestLdtr(pVCpu);
7501
7502 if (fWhat & CPUMCTX_EXTRN_GDTR)
7503 {
7504 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
7505 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
7506 pCtx->gdtr.pGdt = u64Val;
7507 pCtx->gdtr.cbGdt = u32Val;
7508 }
7509
7510 /* Guest IDTR. */
7511 if (fWhat & CPUMCTX_EXTRN_IDTR)
7512 {
7513 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
7514 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
7515 pCtx->idtr.pIdt = u64Val;
7516 pCtx->idtr.cbIdt = u32Val;
7517 }
7518
7519 /* Guest TR. */
7520 if (fWhat & CPUMCTX_EXTRN_TR)
7521 {
7522 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
7523 don't need to import that one. */
7524 if (!pVmcsInfo->RealMode.fRealOnV86Active)
7525 rc |= hmR0VmxImportGuestTr(pVCpu);
7526 }
7527 VMXLOCAL_BREAK_RC(rc);
7528 }
7529
7530 if (fWhat & CPUMCTX_EXTRN_DR7)
7531 {
7532 if (!pVCpu->hm.s.fUsingHyperDR7)
7533 {
7534 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
7535 rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val);
7536 VMXLOCAL_BREAK_RC(rc);
7537 pCtx->dr[7] = u32Val;
7538 }
7539 }
7540
7541 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
7542 {
7543 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip);
7544 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp);
7545 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val);
7546 pCtx->SysEnter.cs = u32Val;
7547 VMXLOCAL_BREAK_RC(rc);
7548 }
7549
7550#if HC_ARCH_BITS == 64
7551 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
7552 {
7553 if ( pVM->hm.s.fAllow64BitGuests
7554 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
7555 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
7556 }
7557
7558 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
7559 {
7560 if ( pVM->hm.s.fAllow64BitGuests
7561 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
7562 {
7563 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
7564 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
7565 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
7566 }
7567 }
7568#endif
7569
7570 if ( (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
7571#if HC_ARCH_BITS == 32
7572 || (fWhat & (CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS))
7573#endif
7574 )
7575 {
7576 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
7577 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
7578 Assert(pMsrs);
7579 Assert(cMsrs <= VMX_MISC_MAX_MSRS(pVM->hm.s.vmx.Msrs.u64Misc));
7580 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
7581 for (uint32_t i = 0; i < cMsrs; i++)
7582 {
7583 uint32_t const idMsr = pMsrs[i].u32Msr;
7584 switch (idMsr)
7585 {
7586 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
7587 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
7588 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
7589#if HC_ARCH_BITS == 32
7590 case MSR_K8_LSTAR: pCtx->msrLSTAR = pMsrs[i].u64Value; break;
7591 case MSR_K6_STAR: pCtx->msrSTAR = pMsrs[i].u64Value; break;
7592 case MSR_K8_SF_MASK: pCtx->msrSFMASK = pMsrs[i].u64Value; break;
7593 case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsrs[i].u64Value; break;
7594#endif
7595 default:
7596 {
7597 pCtx->fExtrn = 0;
7598 pVCpu->hm.s.u32HMError = pMsrs->u32Msr;
7599 ASMSetFlags(fEFlags);
7600 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
7601 return VERR_HM_UNEXPECTED_LD_ST_MSR;
7602 }
7603 }
7604 }
7605 }
7606
7607 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
7608 {
7609 uint64_t u64Shadow;
7610 if (fWhat & CPUMCTX_EXTRN_CR0)
7611 {
7612 /** @todo r=ramshankar: We only read 32-bits here for legacy/convenience reasons,
7613 * remove when we drop 32-bit host w/ 64-bit host support, see
7614 * @bugref{9180#c39}. */
7615 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val);
7616#if HC_ARCH_BITS == 32
7617 uint32_t u32Shadow;
7618 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u32Shadow);
7619 u64Shadow = u32Shadow;
7620#else
7621 rc |= VMXReadVmcs64(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow);
7622#endif
7623 VMXLOCAL_BREAK_RC(rc);
7624 u64Val = u32Val;
7625#if 1
7626 u64Val = (u64Val & ~pVmcsInfo->u64Cr0Mask)
7627 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
7628#else
7629 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
7630 {
7631 u64Val = (u64Val & ~pVmcsInfo->u64Cr0Mask)
7632 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
7633 }
7634 else
7635 {
7636 /** @todo NSTVMX: We need to do some unfudging here because we altered the
7637 * guest/host mask before running the nested-guest. */
7638 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7639 Assert(pVmcsNstGst);
7640
7641 uint64_t const uGstCr0Mask = pVmcsNstGst->u64Cr0Mask.u;
7642 uint64_t const uHstCr0Mask = hmR0VmxGetFixedCr0Mask(pVCpu);
7643 }
7644#endif
7645 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
7646 CPUMSetGuestCR0(pVCpu, u64Val);
7647 VMMRZCallRing3Enable(pVCpu);
7648 }
7649
7650 if (fWhat & CPUMCTX_EXTRN_CR4)
7651 {
7652 /** @todo r=ramshankar: We only read 32-bits here for legacy/convenience reasons,
7653 * remove when we drop 32-bit host w/ 64-bit host support, see
7654 * @bugref{9180#c39}. */
7655 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val);
7656#if HC_ARCH_BITS == 32
7657 uint32_t u32Shadow;
7658 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u32Shadow);
7659 u64Shadow = u32Shadow;
7660#else
7661 rc |= VMXReadVmcs64(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow);
7662#endif
7663 VMXLOCAL_BREAK_RC(rc);
7664 u64Val = u32Val;
7665 u64Val = (u64Val & ~pVmcsInfo->u64Cr4Mask)
7666 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
7667 pCtx->cr4 = u64Val;
7668 }
7669
7670 if (fWhat & CPUMCTX_EXTRN_CR3)
7671 {
7672 /* CR0.PG bit changes are always intercepted, so it's up to date. */
7673 if ( pVM->hm.s.vmx.fUnrestrictedGuest
7674 || ( pVM->hm.s.fNestedPaging
7675 && CPUMIsGuestPagingEnabledEx(pCtx)))
7676 {
7677 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
7678 VMXLOCAL_BREAK_RC(rc);
7679 if (pCtx->cr3 != u64Val)
7680 {
7681 pCtx->cr3 = u64Val;
7682 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
7683 }
7684
7685 /* If the guest is in PAE mode, sync back the PDPE's into the guest state.
7686 Note: CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date. */
7687 if (CPUMIsGuestInPAEModeEx(pCtx))
7688 {
7689 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);
7690 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);
7691 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);
7692 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);
7693 VMXLOCAL_BREAK_RC(rc);
7694 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
7695 }
7696 }
7697 }
7698
7699#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7700# if 0
7701 /** @todo NSTVMX: We handle most of these fields individually by passing it to IEM
7702 * VM-exit handlers as parameters. We would handle it differently when using
7703 * the fast path. */
7704 /*
7705 * The hardware virtualization state currently consists of VMCS fields that may be
7706 * modified by execution of the nested-guest (that are not part of the general
7707 * guest state) and is visible to guest software. Hence, it is technically part of
7708 * the guest-CPU state when executing a nested-guest.
7709 */
7710 if ( (fWhat & CPUMCTX_EXTRN_HWVIRT)
7711 && CPUMIsGuestInVmxNonRootMode(pCtx))
7712 {
7713 PVMXVVMCS pGstVmcs = pCtx->hwvirt.vmx.CTX_SUFF(pVmcs);
7714 rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pGstVmcs->u32RoExitReason);
7715 rc |= VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pGstVmcs->u64RoExitQual.u);
7716 VMXLOCAL_BREAK_RC(rc);
7717
7718 /*
7719 * VM-entry can fail due to invalid-guest state, machine-check events and
7720 * MSR loading failures. Other than VM-exit reason and VM-exit qualification
7721 * all other VMCS fields are left unmodified on VM-entry failure.
7722 *
7723 * See Intel spec. 26.7 "VM-entry Failures During Or After Loading Guest State".
7724 */
7725 bool const fEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(pGstVmcs->u32RoExitReason);
7726 if (!fEntryFailed)
7727 {
7728 /*
7729 * Some notes on VMCS fields that may need importing when the fast path
7730 * is implemented. Currently we fully emulate VMLAUNCH/VMRESUME in IEM.
7731 *
7732 * Requires fixing up when using hardware-assisted VMX:
7733 * - VM-exit interruption info: Shouldn't reflect host interrupts/NMIs.
7734 * - VM-exit interruption error code: Cleared to 0 when not appropriate.
7735 * - IDT-vectoring info: Think about this.
7736 * - IDT-vectoring error code: Think about this.
7737 *
7738 * Emulated:
7739 * - Guest-interruptiblity state: Derived from FFs and RIP.
7740 * - Guest pending debug exceptions: Derived from DR6.
7741 * - Guest activity state: Emulated from EM state.
7742 * - Guest PDPTEs: Currently all 0s since we don't support nested EPT.
7743 * - Entry-interrupt info: Emulated, cleared to 0.
7744 */
7745 rc |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pGstVmcs->u32RoExitIntInfo);
7746 rc |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pGstVmcs->u32RoExitIntErrCode);
7747 rc |= VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_INFO, &pGstVmcs->u32RoIdtVectoringInfo);
7748 rc |= VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pGstVmcs->u32RoIdtVectoringErrCode);
7749 rc |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pGstVmcs->u32RoExitInstrLen);
7750 rc |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pGstVmcs->u32RoExitIntInfo);
7751 rc |= VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pGstVmcs->u64RoGuestPhysAddr.u);
7752 rc |= VMXReadVmcsGstN(VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pGstVmcs->u64RoGuestLinearAddr.u);
7753 /** @todo NSTVMX: Save and adjust preemption timer value. */
7754 }
7755
7756 VMXLOCAL_BREAK_RC(rc);
7757 }
7758# endif
7759#endif
7760 }
7761 } while (0);
7762
7763 if (RT_SUCCESS(rc))
7764 {
7765 /* Update fExtrn. */
7766 pCtx->fExtrn &= ~fWhat;
7767
7768 /* If everything has been imported, clear the HM keeper bit. */
7769 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
7770 {
7771 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
7772 Assert(!pCtx->fExtrn);
7773 }
7774 }
7775 }
7776 else
7777 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
7778
7779 ASMSetFlags(fEFlags);
7780
7781 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatImportGuestState, x);
7782
7783 if (RT_SUCCESS(rc))
7784 { /* likely */ }
7785 else
7786 return rc;
7787
7788 /*
7789 * Honor any pending CR3 updates.
7790 *
7791 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
7792 * -> VMMRZCallRing3Disable() -> hmR0VmxImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
7793 * -> continue with VM-exit handling -> hmR0VmxImportGuestState() and here we are.
7794 *
7795 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
7796 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
7797 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
7798 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
7799 *
7800 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
7801 */
7802 if (VMMRZCallRing3IsEnabled(pVCpu))
7803 {
7804 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
7805 {
7806 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
7807 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
7808 }
7809
7810 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
7811 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
7812
7813 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
7814 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
7815 }
7816
7817 return VINF_SUCCESS;
7818#undef VMXLOCAL_BREAK_RC
7819}
7820
7821
7822/**
7823 * Saves the guest state from the VMCS into the guest-CPU context.
7824 *
7825 * @returns VBox status code.
7826 * @param pVCpu The cross context virtual CPU structure.
7827 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
7828 */
7829VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat)
7830{
7831 PCVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
7832 return hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fWhat);
7833}
7834
7835
7836/**
7837 * Check per-VM and per-VCPU force flag actions that require us to go back to
7838 * ring-3 for one reason or another.
7839 *
7840 * @returns Strict VBox status code (i.e. informational status codes too)
7841 * @retval VINF_SUCCESS if we don't have any actions that require going back to
7842 * ring-3.
7843 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
7844 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
7845 * interrupts)
7846 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
7847 * all EMTs to be in ring-3.
7848 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
7849 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
7850 * to the EM loop.
7851 *
7852 * @param pVCpu The cross context virtual CPU structure.
7853 * @param fStepping Whether we are single-stepping the guest using the
7854 * hypervisor debugger.
7855 */
7856static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVMCPU pVCpu, bool fStepping)
7857{
7858 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7859
7860 /*
7861 * Update pending interrupts into the APIC's IRR.
7862 */
7863 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
7864 APICUpdatePendingInterrupts(pVCpu);
7865
7866 /*
7867 * Anything pending? Should be more likely than not if we're doing a good job.
7868 */
7869 PVM pVM = pVCpu->CTX_SUFF(pVM);
7870 if ( !fStepping
7871 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
7872 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
7873 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
7874 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
7875 return VINF_SUCCESS;
7876
7877 /* Pending PGM C3 sync. */
7878 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
7879 {
7880 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7881 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
7882 VBOXSTRICTRC rcStrict2 = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
7883 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
7884 if (rcStrict2 != VINF_SUCCESS)
7885 {
7886 AssertRC(VBOXSTRICTRC_VAL(rcStrict2));
7887 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict2)));
7888 return rcStrict2;
7889 }
7890 }
7891
7892 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
7893 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
7894 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
7895 {
7896 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
7897 int rc2 = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
7898 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
7899 return rc2;
7900 }
7901
7902 /* Pending VM request packets, such as hardware interrupts. */
7903 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
7904 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
7905 {
7906 Log4Func(("Pending VM request forcing us back to ring-3\n"));
7907 return VINF_EM_PENDING_REQUEST;
7908 }
7909
7910 /* Pending PGM pool flushes. */
7911 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
7912 {
7913 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
7914 return VINF_PGM_POOL_FLUSH_PENDING;
7915 }
7916
7917 /* Pending DMA requests. */
7918 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
7919 {
7920 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
7921 return VINF_EM_RAW_TO_R3;
7922 }
7923
7924 return VINF_SUCCESS;
7925}
7926
7927
7928/**
7929 * Converts any TRPM trap into a pending HM event. This is typically used when
7930 * entering from ring-3 (not longjmp returns).
7931 *
7932 * @param pVCpu The cross context virtual CPU structure.
7933 */
7934static void hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
7935{
7936 Assert(TRPMHasTrap(pVCpu));
7937 Assert(!pVCpu->hm.s.Event.fPending);
7938
7939 uint8_t uVector;
7940 TRPMEVENT enmTrpmEvent;
7941 RTGCUINT uErrCode;
7942 RTGCUINTPTR GCPtrFaultAddress;
7943 uint8_t cbInstr;
7944
7945 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
7946 AssertRC(rc);
7947
7948 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntInfo. */
7949 uint32_t u32IntInfo = uVector | VMX_EXIT_INT_INFO_VALID;
7950 if (enmTrpmEvent == TRPM_TRAP)
7951 {
7952 /** @todo r=ramshankar: TRPM currently offers no way to determine a \#DB that was
7953 * generated using INT1 (ICEBP). */
7954 switch (uVector)
7955 {
7956 case X86_XCPT_NMI:
7957 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_NMI << VMX_EXIT_INT_INFO_TYPE_SHIFT);
7958 break;
7959
7960 case X86_XCPT_BP:
7961 case X86_XCPT_OF:
7962 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_SW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
7963 break;
7964
7965 case X86_XCPT_PF:
7966 case X86_XCPT_DF:
7967 case X86_XCPT_TS:
7968 case X86_XCPT_NP:
7969 case X86_XCPT_SS:
7970 case X86_XCPT_GP:
7971 case X86_XCPT_AC:
7972 u32IntInfo |= VMX_EXIT_INT_INFO_ERROR_CODE_VALID;
7973 RT_FALL_THRU();
7974 default:
7975 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
7976 break;
7977 }
7978 }
7979 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
7980 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_EXT_INT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
7981 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
7982 {
7983 switch (uVector)
7984 {
7985 case X86_XCPT_BP:
7986 case X86_XCPT_OF:
7987 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_SW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
7988 break;
7989
7990 default:
7991 Assert(uVector == X86_XCPT_DB);
7992 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_SW_INT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
7993 break;
7994 }
7995 }
7996 else
7997 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
7998
7999 rc = TRPMResetTrap(pVCpu);
8000 AssertRC(rc);
8001 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
8002 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
8003
8004 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
8005}
8006
8007
8008/**
8009 * Converts the pending HM event into a TRPM trap.
8010 *
8011 * @param pVCpu The cross context virtual CPU structure.
8012 */
8013static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
8014{
8015 Assert(pVCpu->hm.s.Event.fPending);
8016
8017 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
8018 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo);
8019 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVCpu->hm.s.Event.u64IntInfo);
8020 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
8021
8022 /* If a trap was already pending, we did something wrong! */
8023 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
8024
8025 /** @todo Use HMVmxEventToTrpmEventType() later. */
8026 TRPMEVENT enmTrapType;
8027 switch (uVectorType)
8028 {
8029 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
8030 enmTrapType = TRPM_HARDWARE_INT;
8031 break;
8032
8033 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
8034 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
8035 enmTrapType = TRPM_TRAP;
8036 break;
8037
8038 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT: /* #DB (INT1/ICEBP). */
8039 Assert(uVector == X86_XCPT_DB);
8040 enmTrapType = TRPM_SOFTWARE_INT;
8041 break;
8042
8043 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP (INT3) and #OF (INTO) */
8044 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
8045 enmTrapType = TRPM_SOFTWARE_INT;
8046 break;
8047
8048 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
8049 enmTrapType = TRPM_SOFTWARE_INT;
8050 break;
8051
8052 default:
8053 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
8054 enmTrapType = TRPM_32BIT_HACK;
8055 break;
8056 }
8057
8058 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
8059
8060 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
8061 AssertRC(rc);
8062
8063 if (fErrorCodeValid)
8064 TRPMSetErrorCode(pVCpu, uErrorCode);
8065
8066 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
8067 && uVector == X86_XCPT_PF)
8068 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
8069 else if (enmTrapType == TRPM_SOFTWARE_INT)
8070 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
8071
8072 /* We're now done converting the pending event. */
8073 pVCpu->hm.s.Event.fPending = false;
8074}
8075
8076
8077/**
8078 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
8079 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
8080 *
8081 * @param pVCpu The cross context virtual CPU structure.
8082 * @param pVmcsInfo The VMCS info. object.
8083 */
8084static void hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
8085{
8086 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
8087 {
8088 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
8089 {
8090 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
8091 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8092 AssertRC(rc);
8093 }
8094 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
8095}
8096
8097
8098/**
8099 * Clears the interrupt-window exiting control in the VMCS.
8100 *
8101 * @param pVmcsInfo The VMCS info. object.
8102 */
8103DECLINLINE(int) hmR0VmxClearIntWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
8104{
8105 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
8106 {
8107 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
8108 return VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8109 }
8110 return VINF_SUCCESS;
8111}
8112
8113
8114/**
8115 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
8116 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
8117 *
8118 * @param pVCpu The cross context virtual CPU structure.
8119 * @param pVmcsInfo The VMCS info. object.
8120 */
8121static void hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
8122{
8123 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
8124 {
8125 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
8126 {
8127 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
8128 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8129 AssertRC(rc);
8130 Log4Func(("Setup NMI-window exiting\n"));
8131 }
8132 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
8133}
8134
8135
8136/**
8137 * Clears the NMI-window exiting control in the VMCS.
8138 *
8139 * @param pVmcsInfo The VMCS info. object.
8140 */
8141DECLINLINE(int) hmR0VmxClearNmiWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
8142{
8143 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
8144 {
8145 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
8146 return VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8147 }
8148 return VINF_SUCCESS;
8149}
8150
8151
8152/**
8153 * Does the necessary state syncing before returning to ring-3 for any reason
8154 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
8155 *
8156 * @returns VBox status code.
8157 * @param pVCpu The cross context virtual CPU structure.
8158 * @param fImportState Whether to import the guest state from the VMCS back
8159 * to the guest-CPU context.
8160 *
8161 * @remarks No-long-jmp zone!!!
8162 */
8163static int hmR0VmxLeave(PVMCPU pVCpu, bool fImportState)
8164{
8165 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8166 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8167
8168 RTCPUID idCpu = RTMpCpuId();
8169 Log4Func(("HostCpuId=%u\n", idCpu));
8170
8171 /*
8172 * !!! IMPORTANT !!!
8173 * If you modify code here, check whether hmR0VmxCallRing3Callback() needs to be updated too.
8174 */
8175
8176 /* Save the guest state if necessary. */
8177 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
8178 if (fImportState)
8179 {
8180 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8181 AssertRCReturn(rc, rc);
8182 }
8183
8184 /* Restore host FPU state if necessary. We will resync on next R0 reentry. */
8185 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
8186 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
8187
8188 /* Restore host debug registers if necessary. We will resync on next R0 reentry. */
8189#ifdef VBOX_STRICT
8190 if (CPUMIsHyperDebugStateActive(pVCpu))
8191 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT);
8192#endif
8193 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
8194 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu));
8195 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu));
8196
8197#if HC_ARCH_BITS == 64
8198 /* Restore host-state bits that VT-x only restores partially. */
8199 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
8200 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
8201 {
8202 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu));
8203 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
8204 }
8205 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
8206#endif
8207
8208 /* Restore the lazy host MSRs as we're leaving VT-x context. */
8209 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
8210 {
8211 /* We shouldn't restore the host MSRs without saving the guest MSRs first. */
8212 if (!fImportState)
8213 {
8214 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS);
8215 AssertRCReturn(rc, rc);
8216 }
8217 hmR0VmxLazyRestoreHostMsrs(pVCpu);
8218 Assert(!pVCpu->hm.s.vmx.fLazyMsrs);
8219 }
8220 else
8221 pVCpu->hm.s.vmx.fLazyMsrs = 0;
8222
8223 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
8224 pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = false;
8225
8226 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
8227 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState);
8228 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState);
8229 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit);
8230 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling);
8231 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
8232 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
8233 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
8234 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
8235
8236 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
8237
8238 /** @todo This partially defeats the purpose of having preemption hooks.
8239 * The problem is, deregistering the hooks should be moved to a place that
8240 * lasts until the EMT is about to be destroyed not everytime while leaving HM
8241 * context.
8242 */
8243 int rc = hmR0VmxClearVmcs(pVmcsInfo);
8244 AssertRCReturn(rc, rc);
8245
8246 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
8247 NOREF(idCpu);
8248 return VINF_SUCCESS;
8249}
8250
8251
8252/**
8253 * Leaves the VT-x session.
8254 *
8255 * @returns VBox status code.
8256 * @param pVCpu The cross context virtual CPU structure.
8257 *
8258 * @remarks No-long-jmp zone!!!
8259 */
8260static int hmR0VmxLeaveSession(PVMCPU pVCpu)
8261{
8262 HM_DISABLE_PREEMPT(pVCpu);
8263 HMVMX_ASSERT_CPU_SAFE(pVCpu);
8264 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8265 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8266
8267 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
8268 and done this from the VMXR0ThreadCtxCallback(). */
8269 if (!pVCpu->hm.s.fLeaveDone)
8270 {
8271 int rc2 = hmR0VmxLeave(pVCpu, true /* fImportState */);
8272 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2);
8273 pVCpu->hm.s.fLeaveDone = true;
8274 }
8275 Assert(!pVCpu->cpum.GstCtx.fExtrn);
8276
8277 /*
8278 * !!! IMPORTANT !!!
8279 * If you modify code here, make sure to check whether hmR0VmxCallRing3Callback() needs to be updated too.
8280 */
8281
8282 /* Deregister hook now that we've left HM context before re-enabling preemption. */
8283 /** @todo Deregistering here means we need to VMCLEAR always
8284 * (longjmp/exit-to-r3) in VT-x which is not efficient, eliminate need
8285 * for calling VMMR0ThreadCtxHookDisable here! */
8286 VMMR0ThreadCtxHookDisable(pVCpu);
8287
8288 /* Leave HM context. This takes care of local init (term). */
8289 int rc = HMR0LeaveCpu(pVCpu);
8290
8291 HM_RESTORE_PREEMPT();
8292 return rc;
8293}
8294
8295
8296/**
8297 * Does the necessary state syncing before doing a longjmp to ring-3.
8298 *
8299 * @returns VBox status code.
8300 * @param pVCpu The cross context virtual CPU structure.
8301 *
8302 * @remarks No-long-jmp zone!!!
8303 */
8304DECLINLINE(int) hmR0VmxLongJmpToRing3(PVMCPU pVCpu)
8305{
8306 return hmR0VmxLeaveSession(pVCpu);
8307}
8308
8309
8310/**
8311 * Take necessary actions before going back to ring-3.
8312 *
8313 * An action requires us to go back to ring-3. This function does the necessary
8314 * steps before we can safely return to ring-3. This is not the same as longjmps
8315 * to ring-3, this is voluntary and prepares the guest so it may continue
8316 * executing outside HM (recompiler/IEM).
8317 *
8318 * @returns VBox status code.
8319 * @param pVCpu The cross context virtual CPU structure.
8320 * @param rcExit The reason for exiting to ring-3. Can be
8321 * VINF_VMM_UNKNOWN_RING3_CALL.
8322 */
8323static int hmR0VmxExitToRing3(PVMCPU pVCpu, VBOXSTRICTRC rcExit)
8324{
8325 Assert(pVCpu);
8326 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
8327
8328 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
8329 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
8330 {
8331 VMXGetCurrentVmcs(&pVCpu->hm.s.vmx.LastError.HCPhysCurrentVmcs);
8332 pVCpu->hm.s.vmx.LastError.u32VmcsRev = *(uint32_t *)pVmcsInfo->pvVmcs;
8333 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
8334 /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */
8335 }
8336
8337 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
8338 VMMRZCallRing3Disable(pVCpu);
8339 Log4Func(("rcExit=%d\n", VBOXSTRICTRC_VAL(rcExit)));
8340
8341 /*
8342 * Convert any pending HM events back to TRPM due to premature exits to ring-3.
8343 * We need to do this only on returns to ring-3 and not for longjmps to ring3.
8344 *
8345 * This is because execution may continue from ring-3 and we would need to inject
8346 * the event from there (hence place it back in TRPM).
8347 */
8348 if (pVCpu->hm.s.Event.fPending)
8349 {
8350 hmR0VmxPendingEventToTrpmTrap(pVCpu);
8351 Assert(!pVCpu->hm.s.Event.fPending);
8352
8353 /* Clear the events from the VMCS. */
8354 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0);
8355 AssertRCReturn(rc, rc);
8356 }
8357#ifdef VBOX_STRICT
8358 else
8359 {
8360 /*
8361 * Ensure we don't accidentally clear a pending HM event without clearing the VMCS.
8362 * This can be pretty hard to debug otherwise, interrupts might get injected twice
8363 * occasionally, see @bugref{9180#c42}.
8364 */
8365 uint32_t uEntryIntInfo;
8366 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &uEntryIntInfo);
8367 AssertRC(rc);
8368 Assert(!VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo));
8369 }
8370#endif
8371
8372 /*
8373 * Clear the interrupt-window and NMI-window VMCS controls as we could have got
8374 * a VM-exit with higher priority than interrupt-window or NMI-window VM-exits
8375 * (e.g. TPR below threshold).
8376 */
8377 int rc = hmR0VmxClearIntWindowExitVmcs(pVmcsInfo);
8378 rc |= hmR0VmxClearNmiWindowExitVmcs(pVmcsInfo);
8379 AssertRCReturn(rc, rc);
8380
8381 /* If we're emulating an instruction, we shouldn't have any TRPM traps pending
8382 and if we're injecting an event we should have a TRPM trap pending. */
8383 AssertMsg(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
8384#ifndef DEBUG_bird /* Triggered after firing an NMI against NT4SP1, possibly a triple fault in progress. */
8385 AssertMsg(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
8386#endif
8387
8388 /* Save guest state and restore host state bits. */
8389 rc = hmR0VmxLeaveSession(pVCpu);
8390 AssertRCReturn(rc, rc);
8391 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
8392
8393 /* Thread-context hooks are unregistered at this point!!! */
8394
8395 /* Sync recompiler state. */
8396 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
8397 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
8398 | CPUM_CHANGED_LDTR
8399 | CPUM_CHANGED_GDTR
8400 | CPUM_CHANGED_IDTR
8401 | CPUM_CHANGED_TR
8402 | CPUM_CHANGED_HIDDEN_SEL_REGS);
8403 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging
8404 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx))
8405 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
8406
8407 Assert(!pVCpu->hm.s.fClearTrapFlag);
8408
8409 /* Update the exit-to-ring 3 reason. */
8410 pVCpu->hm.s.rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit);
8411
8412 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
8413 if ( rcExit != VINF_EM_RAW_INTERRUPT
8414 || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
8415 {
8416 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL));
8417 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
8418 }
8419
8420 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
8421
8422 /* We do -not- want any longjmp notifications after this! We must return to ring-3 ASAP. */
8423 VMMRZCallRing3RemoveNotification(pVCpu);
8424 VMMRZCallRing3Enable(pVCpu);
8425
8426 return rc;
8427}
8428
8429
8430/**
8431 * VMMRZCallRing3() callback wrapper which saves the guest state before we
8432 * longjump to ring-3 and possibly get preempted.
8433 *
8434 * @returns VBox status code.
8435 * @param pVCpu The cross context virtual CPU structure.
8436 * @param enmOperation The operation causing the ring-3 longjump.
8437 * @param pvUser User argument, currently unused, NULL.
8438 */
8439static DECLCALLBACK(int) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
8440{
8441 RT_NOREF(pvUser);
8442 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
8443 {
8444 /*
8445 * !!! IMPORTANT !!!
8446 * If you modify code here, check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs to be updated too.
8447 * This is a stripped down version which gets out ASAP, trying to not trigger any further assertions.
8448 */
8449 VMMRZCallRing3RemoveNotification(pVCpu);
8450 VMMRZCallRing3Disable(pVCpu);
8451 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
8452 RTThreadPreemptDisable(&PreemptState);
8453
8454 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
8455 hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8456 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
8457 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
8458
8459#if HC_ARCH_BITS == 64
8460 /* Restore host-state bits that VT-x only restores partially. */
8461 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
8462 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
8463 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
8464 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
8465#endif
8466
8467 /* Restore the lazy host MSRs as we're leaving VT-x context. */
8468 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
8469 hmR0VmxLazyRestoreHostMsrs(pVCpu);
8470
8471 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
8472 pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = false;
8473 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
8474
8475 /* Clear the current VMCS data back to memory. */
8476 hmR0VmxClearVmcs(pVmcsInfo);
8477
8478 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
8479 VMMR0ThreadCtxHookDisable(pVCpu);
8480 HMR0LeaveCpu(pVCpu);
8481 RTThreadPreemptRestore(&PreemptState);
8482 return VINF_SUCCESS;
8483 }
8484
8485 Assert(pVCpu);
8486 Assert(pvUser);
8487 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8488 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
8489
8490 VMMRZCallRing3Disable(pVCpu);
8491 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8492
8493 Log4Func((" -> hmR0VmxLongJmpToRing3 enmOperation=%d\n", enmOperation));
8494
8495 int rc = hmR0VmxLongJmpToRing3(pVCpu);
8496 AssertRCReturn(rc, rc);
8497
8498 VMMRZCallRing3Enable(pVCpu);
8499 return VINF_SUCCESS;
8500}
8501
8502
8503/**
8504 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
8505 * stack.
8506 *
8507 * @returns Strict VBox status code (i.e. informational status codes too).
8508 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
8509 * @param pVCpu The cross context virtual CPU structure.
8510 * @param uValue The value to push to the guest stack.
8511 */
8512static VBOXSTRICTRC hmR0VmxRealModeGuestStackPush(PVMCPU pVCpu, uint16_t uValue)
8513{
8514 /*
8515 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
8516 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
8517 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
8518 */
8519 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8520 if (pCtx->sp == 1)
8521 return VINF_EM_RESET;
8522 pCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
8523 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), pCtx->ss.u64Base + pCtx->sp, &uValue, sizeof(uint16_t));
8524 AssertRC(rc);
8525 return rc;
8526}
8527
8528
8529/**
8530 * Injects an event into the guest upon VM-entry by updating the relevant fields
8531 * in the VM-entry area in the VMCS.
8532 *
8533 * @returns Strict VBox status code (i.e. informational status codes too).
8534 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
8535 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
8536 *
8537 * @param pVCpu The cross context virtual CPU structure.
8538 * @param pVmxTransient The VMX-transient structure.
8539 * @param pEvent The event being injected.
8540 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state.
8541 * This will be updated if necessary. This cannot not
8542 * be NULL.
8543 * @param fStepping Whether we're single-stepping guest execution and
8544 * should return VINF_EM_DBG_STEPPED if the event is
8545 * injected directly (registers modified by us, not by
8546 * hardware on VM-entry).
8547 */
8548static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PCHMEVENT pEvent, bool fStepping,
8549 uint32_t *pfIntrState)
8550{
8551 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
8552 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
8553 Assert(pfIntrState);
8554
8555 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8556 uint32_t u32IntInfo = pEvent->u64IntInfo;
8557 uint32_t const u32ErrCode = pEvent->u32ErrCode;
8558 uint32_t const cbInstr = pEvent->cbInstr;
8559 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
8560 uint32_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
8561 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
8562
8563#ifdef VBOX_STRICT
8564 /*
8565 * Validate the error-code-valid bit for hardware exceptions.
8566 * No error codes for exceptions in real-mode.
8567 *
8568 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
8569 */
8570 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
8571 && !CPUMIsGuestInRealModeEx(pCtx))
8572 {
8573 switch (uVector)
8574 {
8575 case X86_XCPT_PF:
8576 case X86_XCPT_DF:
8577 case X86_XCPT_TS:
8578 case X86_XCPT_NP:
8579 case X86_XCPT_SS:
8580 case X86_XCPT_GP:
8581 case X86_XCPT_AC:
8582 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
8583 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
8584 RT_FALL_THRU();
8585 default:
8586 break;
8587 }
8588 }
8589
8590 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
8591 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
8592 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
8593#endif
8594
8595 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
8596
8597 /*
8598 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
8599 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
8600 * interrupt handler in the (real-mode) guest.
8601 *
8602 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
8603 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
8604 */
8605 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
8606 {
8607 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest)
8608 {
8609 /*
8610 * For CPUs with unrestricted guest execution enabled and with the guest
8611 * in real-mode, we must not set the deliver-error-code bit.
8612 *
8613 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
8614 */
8615 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
8616 }
8617 else
8618 {
8619 PVM pVM = pVCpu->CTX_SUFF(pVM);
8620 Assert(PDMVmmDevHeapIsEnabled(pVM));
8621 Assert(pVM->hm.s.vmx.pRealModeTSS);
8622 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
8623
8624 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
8625 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8626 int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
8627 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
8628 AssertRCReturn(rc2, rc2);
8629
8630 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
8631 size_t const cbIdtEntry = sizeof(X86IDTR16);
8632 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
8633 {
8634 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
8635 if (uVector == X86_XCPT_DF)
8636 return VINF_EM_RESET;
8637
8638 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
8639 No error codes for exceptions in real-mode. */
8640 if (uVector == X86_XCPT_GP)
8641 {
8642 uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
8643 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
8644 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
8645 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
8646 HMEVENT EventXcptDf;
8647 RT_ZERO(EventXcptDf);
8648 EventXcptDf.u64IntInfo = uXcptDfInfo;
8649 return hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &EventXcptDf, fStepping, pfIntrState);
8650 }
8651
8652 /*
8653 * If we're injecting an event with no valid IDT entry, inject a #GP.
8654 * No error codes for exceptions in real-mode.
8655 *
8656 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
8657 */
8658 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
8659 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
8660 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
8661 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
8662 HMEVENT EventXcptGp;
8663 RT_ZERO(EventXcptGp);
8664 EventXcptGp.u64IntInfo = uXcptGpInfo;
8665 return hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &EventXcptGp, fStepping, pfIntrState);
8666 }
8667
8668 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
8669 uint16_t uGuestIp = pCtx->ip;
8670 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
8671 {
8672 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
8673 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
8674 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
8675 }
8676 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
8677 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
8678
8679 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
8680 X86IDTR16 IdtEntry;
8681 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
8682 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
8683 AssertRCReturn(rc2, rc2);
8684
8685 /* Construct the stack frame for the interrupt/exception handler. */
8686 VBOXSTRICTRC rcStrict;
8687 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
8688 if (rcStrict == VINF_SUCCESS)
8689 {
8690 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
8691 if (rcStrict == VINF_SUCCESS)
8692 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
8693 }
8694
8695 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
8696 if (rcStrict == VINF_SUCCESS)
8697 {
8698 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
8699 pCtx->rip = IdtEntry.offSel;
8700 pCtx->cs.Sel = IdtEntry.uSel;
8701 pCtx->cs.ValidSel = IdtEntry.uSel;
8702 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
8703 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
8704 && uVector == X86_XCPT_PF)
8705 pCtx->cr2 = GCPtrFault;
8706
8707 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
8708 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8709 | HM_CHANGED_GUEST_RSP);
8710
8711 /*
8712 * If we delivered a hardware exception (other than an NMI) and if there was
8713 * block-by-STI in effect, we should clear it.
8714 */
8715 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
8716 {
8717 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
8718 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
8719 Log4Func(("Clearing inhibition due to STI\n"));
8720 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
8721 }
8722
8723 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
8724 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
8725
8726 /*
8727 * The event has been truly dispatched to the guest. Mark it as no longer pending so
8728 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
8729 */
8730 pVCpu->hm.s.Event.fPending = false;
8731
8732 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
8733 if (fStepping)
8734 rcStrict = VINF_EM_DBG_STEPPED;
8735 }
8736 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
8737 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8738 return rcStrict;
8739 }
8740 }
8741
8742 /*
8743 * Validate.
8744 */
8745 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
8746 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
8747
8748 /*
8749 * Inject the event into the VMCS.
8750 */
8751 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
8752 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
8753 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
8754 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
8755 AssertRCReturn(rc, rc);
8756
8757 /*
8758 * Update guest CR2 if this is a page-fault.
8759 */
8760 if ( VMX_ENTRY_INT_INFO_TYPE(u32IntInfo) == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
8761 && uVector == X86_XCPT_PF)
8762 pCtx->cr2 = GCPtrFault;
8763
8764 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
8765 return VINF_SUCCESS;
8766}
8767
8768
8769/**
8770 * Evaluates the event to be delivered to the guest and sets it as the pending
8771 * event.
8772 *
8773 * @returns Strict VBox status code (i.e. informational status codes too).
8774 * @param pVCpu The cross context virtual CPU structure.
8775 * @param pVmxTransient The VMX-transient structure.
8776 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
8777 */
8778static VBOXSTRICTRC hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t *pfIntrState)
8779{
8780 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8781 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8782
8783 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
8784 uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu, pVmcsInfo);
8785 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
8786 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
8787 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
8788
8789 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));
8790 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
8791 Assert(!fBlockSti || pCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
8792 Assert(!TRPMHasTrap(pVCpu));
8793 Assert(pfIntrState);
8794
8795 *pfIntrState = fIntrState;
8796
8797 /*
8798 * Toggling of interrupt force-flags here is safe since we update TRPM on premature exits
8799 * to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must NOT restore these force-flags.
8800 */
8801 /** @todo SMI. SMIs take priority over NMIs. */
8802 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */
8803 {
8804 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
8805 if ( !pVCpu->hm.s.Event.fPending
8806 && !fBlockNmi
8807 && !fBlockSti
8808 && !fBlockMovSS)
8809 {
8810#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8811 if ( pVmxTransient->fIsNestedGuest
8812 && CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_NMI_EXIT))
8813 {
8814 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitNmi(pVCpu);
8815 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
8816 return rcStrict;
8817 }
8818#endif
8819 hmR0VmxSetPendingXcptNmi(pVCpu);
8820 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
8821 Log4Func(("Pending NMI\n"));
8822 }
8823 else
8824 hmR0VmxSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
8825 }
8826 /*
8827 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns
8828 * a valid interrupt we -must- deliver the interrupt. We can no longer re-request it from the APIC.
8829 */
8830 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
8831 && !pVCpu->hm.s.fSingleInstruction)
8832 {
8833 Assert(!DBGFIsStepping(pVCpu));
8834 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
8835 AssertRCReturn(rc, rc);
8836 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);
8837 if ( !pVCpu->hm.s.Event.fPending
8838 && !fBlockInt
8839 && !fBlockSti
8840 && !fBlockMovSS)
8841 {
8842#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8843 if ( pVmxTransient->fIsNestedGuest
8844 && CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
8845 {
8846 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0/* uVector */, true /* fIntPending */);
8847 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
8848 return rcStrict;
8849 }
8850#endif
8851 uint8_t u8Interrupt;
8852 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
8853 if (RT_SUCCESS(rc))
8854 {
8855#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8856 if ( pVmxTransient->fIsNestedGuest
8857 && CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
8858 && CPUMIsGuestVmxExitCtlsSet(pVCpu, pCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
8859 {
8860 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
8861 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
8862 return rcStrict;
8863 }
8864#endif
8865 hmR0VmxSetPendingExtInt(pVCpu, u8Interrupt);
8866 Log4Func(("Pending external interrupt vector %#x\n", u8Interrupt));
8867 }
8868 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
8869 {
8870 if ( !pVmxTransient->fIsNestedGuest
8871 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
8872 hmR0VmxApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
8873 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
8874
8875 /*
8876 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
8877 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
8878 * need to re-set this force-flag here.
8879 */
8880 }
8881 else
8882 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
8883 }
8884 else
8885 hmR0VmxSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
8886 }
8887
8888 return VINF_SUCCESS;
8889}
8890
8891
8892/**
8893 * Injects any pending events into the guest if the guest is in a state to
8894 * receive them.
8895 *
8896 * @returns Strict VBox status code (i.e. informational status codes too).
8897 * @param pVCpu The cross context virtual CPU structure.
8898 * @param pVmxTransient The VMX-transient structure.
8899 * @param fIntrState The VT-x guest-interruptibility state.
8900 * @param fStepping Whether we are single-stepping the guest using the
8901 * hypervisor debugger and should return
8902 * VINF_EM_DBG_STEPPED if the event was dispatched
8903 * directly.
8904 */
8905static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t fIntrState, bool fStepping)
8906{
8907 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
8908 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8909
8910 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
8911 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
8912
8913 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
8914 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
8915 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
8916 Assert(!TRPMHasTrap(pVCpu));
8917
8918 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
8919 if (pVCpu->hm.s.Event.fPending)
8920 {
8921 /*
8922 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
8923 * pending even while injecting an event and in this case, we want a VM-exit as soon as
8924 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
8925 *
8926 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
8927 */
8928 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
8929#ifdef VBOX_STRICT
8930 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
8931 {
8932 bool const fBlockInt = !(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
8933 Assert(!fBlockInt);
8934 Assert(!fBlockSti);
8935 Assert(!fBlockMovSS);
8936 }
8937 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
8938 {
8939 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
8940 Assert(!fBlockSti);
8941 Assert(!fBlockMovSS);
8942 Assert(!fBlockNmi);
8943 }
8944#endif
8945 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
8946 uIntType));
8947
8948 /*
8949 * Inject the event and get any changes to the guest-interruptibility state.
8950 *
8951 * The guest-interruptibility state may need to be updated if we inject the event
8952 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
8953 */
8954 rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &pVCpu->hm.s.Event, fStepping, &fIntrState);
8955 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
8956
8957 /*
8958 * If we are executing a nested-guest make sure that we should intercept subsequent
8959 * events. The one we are injecting might be part of VM-entry.
8960 */
8961 if (pVmxTransient->fIsNestedGuest)
8962 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = true;
8963
8964 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
8965 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
8966 else
8967 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
8968 }
8969
8970 /*
8971 * Update the guest-interruptibility state.
8972 *
8973 * This is required for the real-on-v86 software interrupt injection case above, as well as
8974 * updates to the guest state from ring-3 or IEM/REM.
8975 */
8976 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
8977 AssertRCReturn(rc, rc);
8978
8979 /*
8980 * There's no need to clear the VM-entry interruption-information field here if we're not
8981 * injecting anything. VT-x clears the valid bit on every VM-exit.
8982 *
8983 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
8984 */
8985
8986 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
8987 NOREF(fBlockMovSS); NOREF(fBlockSti);
8988 return rcStrict;
8989}
8990
8991
8992/**
8993 * Enters the VT-x session.
8994 *
8995 * @returns VBox status code.
8996 * @param pVCpu The cross context virtual CPU structure.
8997 */
8998VMMR0DECL(int) VMXR0Enter(PVMCPU pVCpu)
8999{
9000 AssertPtr(pVCpu);
9001 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported);
9002 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
9003
9004 LogFlowFunc(("pVCpu=%p\n", pVCpu));
9005 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
9006 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
9007
9008#ifdef VBOX_STRICT
9009 /* At least verify VMX is enabled, since we can't check if we're in VMX root mode without #GP'ing. */
9010 RTCCUINTREG uHostCR4 = ASMGetCR4();
9011 if (!(uHostCR4 & X86_CR4_VMXE))
9012 {
9013 LogRelFunc(("X86_CR4_VMXE bit in CR4 is not set!\n"));
9014 return VERR_VMX_X86_CR4_VMXE_CLEARED;
9015 }
9016#endif
9017
9018 /*
9019 * Load the appropriate VMCS as the current and active one.
9020 */
9021 PVMXVMCSINFO pVmcsInfo;
9022 bool const fInNestedGuestMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
9023 if (!fInNestedGuestMode)
9024 pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfo;
9025 else
9026 pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfoNstGst;
9027 int rc = hmR0VmxLoadVmcs(pVmcsInfo);
9028 if (RT_SUCCESS(rc))
9029 {
9030 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs = fInNestedGuestMode;
9031 pVCpu->hm.s.fLeaveDone = false;
9032 Log4Func(("Loaded Vmcs. HostCpuId=%u\n", RTMpCpuId()));
9033
9034 /*
9035 * Do the EMT scheduled L1D flush here if needed.
9036 */
9037 if (pVCpu->CTX_SUFF(pVM)->hm.s.fL1dFlushOnSched)
9038 ASMWrMsr(MSR_IA32_FLUSH_CMD, MSR_IA32_FLUSH_CMD_F_L1D);
9039 else if (pVCpu->CTX_SUFF(pVM)->hm.s.fMdsClearOnSched)
9040 hmR0MdsClear();
9041 }
9042 return rc;
9043}
9044
9045
9046/**
9047 * The thread-context callback (only on platforms which support it).
9048 *
9049 * @param enmEvent The thread-context event.
9050 * @param pVCpu The cross context virtual CPU structure.
9051 * @param fGlobalInit Whether global VT-x/AMD-V init. was used.
9052 * @thread EMT(pVCpu)
9053 */
9054VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
9055{
9056 NOREF(fGlobalInit);
9057
9058 switch (enmEvent)
9059 {
9060 case RTTHREADCTXEVENT_OUT:
9061 {
9062 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
9063 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
9064 VMCPU_ASSERT_EMT(pVCpu);
9065
9066 /* No longjmps (logger flushes, locks) in this fragile context. */
9067 VMMRZCallRing3Disable(pVCpu);
9068 Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId()));
9069
9070 /* Restore host-state (FPU, debug etc.) */
9071 if (!pVCpu->hm.s.fLeaveDone)
9072 {
9073 /*
9074 * Do -not- import the guest-state here as we might already be in the middle of importing
9075 * it, esp. bad if we're holding the PGM lock, see comment in hmR0VmxImportGuestState().
9076 */
9077 hmR0VmxLeave(pVCpu, false /* fImportState */);
9078 pVCpu->hm.s.fLeaveDone = true;
9079 }
9080
9081 /* Leave HM context, takes care of local init (term). */
9082 int rc = HMR0LeaveCpu(pVCpu);
9083 AssertRC(rc);
9084
9085 /* Restore longjmp state. */
9086 VMMRZCallRing3Enable(pVCpu);
9087 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreempt);
9088 break;
9089 }
9090
9091 case RTTHREADCTXEVENT_IN:
9092 {
9093 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
9094 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
9095 VMCPU_ASSERT_EMT(pVCpu);
9096
9097 /* No longjmps here, as we don't want to trigger preemption (& its hook) while resuming. */
9098 VMMRZCallRing3Disable(pVCpu);
9099 Log4Func(("Resumed: HostCpuId=%u\n", RTMpCpuId()));
9100
9101 /* Initialize the bare minimum state required for HM. This takes care of
9102 initializing VT-x if necessary (onlined CPUs, local init etc.) */
9103 int rc = hmR0EnterCpu(pVCpu);
9104 AssertRC(rc);
9105 Assert( (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
9106 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
9107
9108 /* Load the active VMCS as the current one. */
9109 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
9110 rc = hmR0VmxLoadVmcs(pVmcsInfo);
9111 AssertRC(rc);
9112 Log4Func(("Resumed: Loaded Vmcs. HostCpuId=%u\n", RTMpCpuId()));
9113 pVCpu->hm.s.fLeaveDone = false;
9114
9115 /* Do the EMT scheduled L1D flush if needed. */
9116 if (pVCpu->CTX_SUFF(pVM)->hm.s.fL1dFlushOnSched)
9117 ASMWrMsr(MSR_IA32_FLUSH_CMD, MSR_IA32_FLUSH_CMD_F_L1D);
9118
9119 /* Restore longjmp state. */
9120 VMMRZCallRing3Enable(pVCpu);
9121 break;
9122 }
9123
9124 default:
9125 break;
9126 }
9127}
9128
9129
9130/**
9131 * Exports the host state into the VMCS host-state area.
9132 * Sets up the VM-exit MSR-load area.
9133 *
9134 * The CPU state will be loaded from these fields on every successful VM-exit.
9135 *
9136 * @returns VBox status code.
9137 * @param pVCpu The cross context virtual CPU structure.
9138 *
9139 * @remarks No-long-jump zone!!!
9140 */
9141static int hmR0VmxExportHostState(PVMCPU pVCpu)
9142{
9143 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
9144
9145 int rc = VINF_SUCCESS;
9146 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)
9147 {
9148 rc = hmR0VmxExportHostControlRegs();
9149 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9150
9151 rc = hmR0VmxExportHostSegmentRegs(pVCpu);
9152 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9153
9154 rc = hmR0VmxExportHostMsrs(pVCpu);
9155 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9156
9157 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_HOST_CONTEXT;
9158 }
9159 return rc;
9160}
9161
9162
9163/**
9164 * Saves the host state in the VMCS host-state.
9165 *
9166 * @returns VBox status code.
9167 * @param pVCpu The cross context virtual CPU structure.
9168 *
9169 * @remarks No-long-jump zone!!!
9170 */
9171VMMR0DECL(int) VMXR0ExportHostState(PVMCPU pVCpu)
9172{
9173 AssertPtr(pVCpu);
9174 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
9175
9176 /*
9177 * Export the host state here while entering HM context.
9178 * When thread-context hooks are used, we might get preempted and have to re-save the host
9179 * state but most of the time we won't be, so do it here before we disable interrupts.
9180 */
9181 return hmR0VmxExportHostState(pVCpu);
9182}
9183
9184
9185/**
9186 * Exports the guest state into the VMCS guest-state area.
9187 *
9188 * The will typically be done before VM-entry when the guest-CPU state and the
9189 * VMCS state may potentially be out of sync.
9190 *
9191 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas. Sets up the
9192 * VM-entry controls.
9193 * Sets up the appropriate VMX non-root function to execute guest code based on
9194 * the guest CPU mode.
9195 *
9196 * @returns VBox strict status code.
9197 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
9198 * without unrestricted guest execution and the VMMDev is not presently
9199 * mapped (e.g. EFI32).
9200 *
9201 * @param pVCpu The cross context virtual CPU structure.
9202 * @param pVmxTransient The VMX-transient structure.
9203 *
9204 * @remarks No-long-jump zone!!!
9205 */
9206static VBOXSTRICTRC hmR0VmxExportGuestState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
9207{
9208 AssertPtr(pVCpu);
9209 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
9210 LogFlowFunc(("pVCpu=%p\n", pVCpu));
9211
9212 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
9213
9214 /*
9215 * Determine real-on-v86 mode.
9216 * Used when the guest is in real-mode and unrestricted guest execution is not used.
9217 */
9218 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9219 if ( pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest
9220 || !CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx))
9221 pVmcsInfo->RealMode. fRealOnV86Active = false;
9222 else
9223 {
9224 Assert(!pVmxTransient->fIsNestedGuest);
9225 pVmcsInfo->RealMode.fRealOnV86Active = true;
9226 }
9227
9228 /*
9229 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
9230 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
9231 */
9232 /** @todo r=ramshankar: Move hmR0VmxSelectVMRunHandler inside
9233 * hmR0VmxExportGuestEntryExitCtls and do it conditionally. There shouldn't
9234 * be a need to evaluate this everytime since I'm pretty sure we intercept
9235 * all guest paging mode changes. */
9236 int rc = hmR0VmxSelectVMRunHandler(pVCpu, pVmxTransient);
9237 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9238
9239 rc = hmR0VmxExportGuestEntryExitCtls(pVCpu, pVmxTransient);
9240 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9241
9242 rc = hmR0VmxExportGuestCR0(pVCpu, pVmxTransient);
9243 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9244
9245 VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu, pVmxTransient);
9246 if (rcStrict == VINF_SUCCESS)
9247 { /* likely */ }
9248 else
9249 {
9250 Assert(rcStrict == VINF_EM_RESCHEDULE_REM || RT_FAILURE_NP(rcStrict));
9251 return rcStrict;
9252 }
9253
9254 rc = hmR0VmxExportGuestSegRegsXdtr(pVCpu, pVmxTransient);
9255 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9256
9257 rc = hmR0VmxExportGuestMsrs(pVCpu, pVmxTransient);
9258 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9259
9260 rc = hmR0VmxExportGuestApicTpr(pVCpu, pVmxTransient);
9261 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9262
9263 rc = hmR0VmxExportGuestXcptIntercepts(pVCpu, pVmxTransient);
9264 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9265
9266 rc = hmR0VmxExportGuestRip(pVCpu);
9267 rc |= hmR0VmxExportGuestRsp(pVCpu);
9268 rc |= hmR0VmxExportGuestRflags(pVCpu, pVmxTransient);
9269 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9270
9271 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
9272 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP)
9273 | HM_CHANGED_GUEST_CR2
9274 | (HM_CHANGED_GUEST_DR_MASK & ~HM_CHANGED_GUEST_DR7)
9275 | HM_CHANGED_GUEST_X87
9276 | HM_CHANGED_GUEST_SSE_AVX
9277 | HM_CHANGED_GUEST_OTHER_XSAVE
9278 | HM_CHANGED_GUEST_XCRx
9279 | HM_CHANGED_GUEST_KERNEL_GS_BASE /* Part of lazy or auto load-store MSRs. */
9280 | HM_CHANGED_GUEST_SYSCALL_MSRS /* Part of lazy or auto load-store MSRs. */
9281 | HM_CHANGED_GUEST_TSC_AUX
9282 | HM_CHANGED_GUEST_OTHER_MSRS
9283 | HM_CHANGED_GUEST_HWVIRT /* More accurate PLE handling someday? */
9284 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK)));
9285
9286 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x);
9287 return rc;
9288}
9289
9290
9291/**
9292 * Exports the state shared between the host and guest into the VMCS.
9293 *
9294 * @param pVCpu The cross context virtual CPU structure.
9295 * @param pVmxTransient The VMX-transient structure.
9296 *
9297 * @remarks No-long-jump zone!!!
9298 */
9299static void hmR0VmxExportSharedState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
9300{
9301 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
9302 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
9303
9304 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
9305 {
9306 int rc = hmR0VmxExportSharedDebugState(pVCpu, pVmxTransient);
9307 AssertRC(rc);
9308 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
9309
9310 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
9311 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS)
9312 {
9313 rc = hmR0VmxExportGuestRflags(pVCpu, pVmxTransient);
9314 AssertRC(rc);
9315 }
9316 }
9317
9318 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS)
9319 {
9320 hmR0VmxLazyLoadGuestMsrs(pVCpu);
9321 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS;
9322 }
9323
9324 AssertMsg(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE),
9325 ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
9326}
9327
9328
9329/**
9330 * Worker for loading the guest-state bits in the inner VT-x execution loop.
9331 *
9332 * @returns Strict VBox status code (i.e. informational status codes too).
9333 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
9334 * without unrestricted guest execution and the VMMDev is not presently
9335 * mapped (e.g. EFI32).
9336 *
9337 * @param pVCpu The cross context virtual CPU structure.
9338 * @param pVmxTransient The VMX-transient structure.
9339 *
9340 * @remarks No-long-jump zone!!!
9341 */
9342static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
9343{
9344 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
9345 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
9346 Assert(VMMR0IsLogFlushDisabled(pVCpu));
9347
9348#ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
9349 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
9350#endif
9351
9352 /*
9353 * For many exits it's only RIP that changes and hence try to export it first
9354 * without going through a lot of change flag checks.
9355 */
9356 VBOXSTRICTRC rcStrict;
9357 uint64_t fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
9358 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
9359 if ((fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) == HM_CHANGED_GUEST_RIP)
9360 {
9361 rcStrict = hmR0VmxExportGuestRip(pVCpu);
9362 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9363 { /* likely */}
9364 else
9365 AssertMsgFailedReturn(("Failed to export guest RIP! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
9366 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportMinimal);
9367 }
9368 else if (fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
9369 {
9370 rcStrict = hmR0VmxExportGuestState(pVCpu, pVmxTransient);
9371 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9372 { /* likely */}
9373 else
9374 {
9375 AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, ("Failed to export guest state! rc=%Rrc\n",
9376 VBOXSTRICTRC_VAL(rcStrict)));
9377 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
9378 return rcStrict;
9379 }
9380 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
9381 }
9382 else
9383 rcStrict = VINF_SUCCESS;
9384
9385#ifdef VBOX_STRICT
9386 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
9387 fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
9388 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
9389 AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)),
9390 ("fCtxChanged=%#RX64\n", fCtxChanged));
9391#endif
9392 return rcStrict;
9393}
9394
9395
9396/**
9397 * Tries to determine what part of the guest-state VT-x has deemed as invalid
9398 * and update error record fields accordingly.
9399 *
9400 * @return VMX_IGS_* return codes.
9401 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
9402 * wrong with the guest state.
9403 *
9404 * @param pVCpu The cross context virtual CPU structure.
9405 * @param pVmcsInfo The VMCS info. object.
9406 *
9407 * @remarks This function assumes our cache of the VMCS controls
9408 * are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
9409 */
9410static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
9411{
9412#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
9413#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \
9414 uError = (err); \
9415 break; \
9416 } else do { } while (0)
9417
9418 int rc;
9419 PVM pVM = pVCpu->CTX_SUFF(pVM);
9420 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9421 uint32_t uError = VMX_IGS_ERROR;
9422 uint32_t u32Val;
9423 bool const fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
9424
9425 do
9426 {
9427 /*
9428 * CR0.
9429 */
9430 uint32_t fSetCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
9431 uint32_t const fZapCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
9432 /* Exceptions for unrestricted guest execution for fixed CR0 bits (PE, PG).
9433 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
9434 if (fUnrestrictedGuest)
9435 fSetCr0 &= ~(X86_CR0_PE | X86_CR0_PG);
9436
9437 uint32_t u32GuestCr0;
9438 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCr0);
9439 AssertRCBreak(rc);
9440 HMVMX_CHECK_BREAK((u32GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
9441 HMVMX_CHECK_BREAK(!(u32GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
9442 if ( !fUnrestrictedGuest
9443 && (u32GuestCr0 & X86_CR0_PG)
9444 && !(u32GuestCr0 & X86_CR0_PE))
9445 {
9446 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
9447 }
9448
9449 /*
9450 * CR4.
9451 */
9452 uint64_t const fSetCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
9453 uint64_t const fZapCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
9454
9455 uint32_t u32GuestCr4;
9456 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCr4);
9457 AssertRCBreak(rc);
9458 HMVMX_CHECK_BREAK((u32GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
9459 HMVMX_CHECK_BREAK(!(u32GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
9460
9461 /*
9462 * IA32_DEBUGCTL MSR.
9463 */
9464 uint64_t u64Val;
9465 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
9466 AssertRCBreak(rc);
9467 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
9468 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
9469 {
9470 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
9471 }
9472 uint64_t u64DebugCtlMsr = u64Val;
9473
9474#ifdef VBOX_STRICT
9475 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
9476 AssertRCBreak(rc);
9477 Assert(u32Val == pVmcsInfo->u32EntryCtls);
9478#endif
9479 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
9480
9481 /*
9482 * RIP and RFLAGS.
9483 */
9484 uint32_t u32Eflags;
9485#if HC_ARCH_BITS == 64
9486 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
9487 AssertRCBreak(rc);
9488 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
9489 if ( !fLongModeGuest
9490 || !pCtx->cs.Attr.n.u1Long)
9491 {
9492 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
9493 }
9494 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
9495 * must be identical if the "IA-32e mode guest" VM-entry
9496 * control is 1 and CS.L is 1. No check applies if the
9497 * CPU supports 64 linear-address bits. */
9498
9499 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
9500 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
9501 AssertRCBreak(rc);
9502 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
9503 VMX_IGS_RFLAGS_RESERVED);
9504 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
9505 u32Eflags = u64Val;
9506#else
9507 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
9508 AssertRCBreak(rc);
9509 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */
9510 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
9511#endif
9512
9513 if ( fLongModeGuest
9514 || ( fUnrestrictedGuest
9515 && !(u32GuestCr0 & X86_CR0_PE)))
9516 {
9517 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
9518 }
9519
9520 uint32_t u32EntryInfo;
9521 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
9522 AssertRCBreak(rc);
9523 if ( VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo)
9524 && VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_EXT_INT)
9525 {
9526 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
9527 }
9528
9529 /*
9530 * 64-bit checks.
9531 */
9532#if HC_ARCH_BITS == 64
9533 if (fLongModeGuest)
9534 {
9535 HMVMX_CHECK_BREAK(u32GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
9536 HMVMX_CHECK_BREAK(u32GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
9537 }
9538
9539 if ( !fLongModeGuest
9540 && (u32GuestCr4 & X86_CR4_PCIDE))
9541 {
9542 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
9543 }
9544
9545 /** @todo CR3 field must be such that bits 63:52 and bits in the range
9546 * 51:32 beyond the processor's physical-address width are 0. */
9547
9548 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
9549 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
9550 {
9551 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
9552 }
9553
9554 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
9555 AssertRCBreak(rc);
9556 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
9557
9558 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
9559 AssertRCBreak(rc);
9560 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
9561#endif
9562
9563 /*
9564 * PERF_GLOBAL MSR.
9565 */
9566 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
9567 {
9568 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
9569 AssertRCBreak(rc);
9570 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
9571 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
9572 }
9573
9574 /*
9575 * PAT MSR.
9576 */
9577 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
9578 {
9579 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
9580 AssertRCBreak(rc);
9581 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
9582 for (unsigned i = 0; i < 8; i++)
9583 {
9584 uint8_t u8Val = (u64Val & 0xff);
9585 if ( u8Val != 0 /* UC */
9586 && u8Val != 1 /* WC */
9587 && u8Val != 4 /* WT */
9588 && u8Val != 5 /* WP */
9589 && u8Val != 6 /* WB */
9590 && u8Val != 7 /* UC- */)
9591 {
9592 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
9593 }
9594 u64Val >>= 8;
9595 }
9596 }
9597
9598 /*
9599 * EFER MSR.
9600 */
9601 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
9602 {
9603 Assert(pVM->hm.s.vmx.fSupportsVmcsEfer);
9604 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
9605 AssertRCBreak(rc);
9606 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
9607 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
9608 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
9609 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
9610 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
9611 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
9612 * iemVmxVmentryCheckGuestState(). */
9613 HMVMX_CHECK_BREAK( fUnrestrictedGuest
9614 || !(u32GuestCr0 & X86_CR0_PG)
9615 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
9616 VMX_IGS_EFER_LMA_LME_MISMATCH);
9617 }
9618
9619 /*
9620 * Segment registers.
9621 */
9622 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9623 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
9624 if (!(u32Eflags & X86_EFL_VM))
9625 {
9626 /* CS */
9627 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
9628 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
9629 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
9630 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
9631 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9632 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
9633 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9634 /* CS cannot be loaded with NULL in protected mode. */
9635 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
9636 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
9637 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
9638 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
9639 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
9640 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
9641 else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
9642 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
9643 else
9644 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
9645
9646 /* SS */
9647 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9648 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
9649 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
9650 if ( !(pCtx->cr0 & X86_CR0_PE)
9651 || pCtx->cs.Attr.n.u4Type == 3)
9652 {
9653 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
9654 }
9655 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
9656 {
9657 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
9658 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
9659 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
9660 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
9661 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
9662 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9663 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
9664 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9665 }
9666
9667 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSReg(). */
9668 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
9669 {
9670 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
9671 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
9672 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9673 || pCtx->ds.Attr.n.u4Type > 11
9674 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9675 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
9676 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
9677 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
9678 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9679 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
9680 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9681 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9682 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
9683 }
9684 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
9685 {
9686 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
9687 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
9688 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9689 || pCtx->es.Attr.n.u4Type > 11
9690 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9691 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
9692 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
9693 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
9694 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9695 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
9696 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9697 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9698 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
9699 }
9700 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
9701 {
9702 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
9703 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
9704 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9705 || pCtx->fs.Attr.n.u4Type > 11
9706 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
9707 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
9708 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
9709 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
9710 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
9711 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
9712 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
9713 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9714 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
9715 }
9716 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
9717 {
9718 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
9719 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
9720 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9721 || pCtx->gs.Attr.n.u4Type > 11
9722 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
9723 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
9724 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
9725 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
9726 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
9727 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
9728 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
9729 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9730 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
9731 }
9732 /* 64-bit capable CPUs. */
9733#if HC_ARCH_BITS == 64
9734 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
9735 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
9736 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9737 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
9738 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
9739 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
9740 VMX_IGS_LONGMODE_SS_BASE_INVALID);
9741 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
9742 VMX_IGS_LONGMODE_DS_BASE_INVALID);
9743 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
9744 VMX_IGS_LONGMODE_ES_BASE_INVALID);
9745#endif
9746 }
9747 else
9748 {
9749 /* V86 mode checks. */
9750 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
9751 if (pVmcsInfo->RealMode.fRealOnV86Active)
9752 {
9753 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
9754 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
9755 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
9756 }
9757 else
9758 {
9759 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
9760 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
9761 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
9762 }
9763
9764 /* CS */
9765 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
9766 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
9767 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
9768 /* SS */
9769 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
9770 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
9771 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
9772 /* DS */
9773 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
9774 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
9775 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
9776 /* ES */
9777 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
9778 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
9779 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
9780 /* FS */
9781 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
9782 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
9783 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
9784 /* GS */
9785 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
9786 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
9787 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
9788 /* 64-bit capable CPUs. */
9789#if HC_ARCH_BITS == 64
9790 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
9791 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
9792 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9793 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
9794 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
9795 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
9796 VMX_IGS_LONGMODE_SS_BASE_INVALID);
9797 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
9798 VMX_IGS_LONGMODE_DS_BASE_INVALID);
9799 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
9800 VMX_IGS_LONGMODE_ES_BASE_INVALID);
9801#endif
9802 }
9803
9804 /*
9805 * TR.
9806 */
9807 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
9808 /* 64-bit capable CPUs. */
9809#if HC_ARCH_BITS == 64
9810 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
9811#endif
9812 if (fLongModeGuest)
9813 {
9814 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
9815 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
9816 }
9817 else
9818 {
9819 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
9820 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
9821 VMX_IGS_TR_ATTR_TYPE_INVALID);
9822 }
9823 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
9824 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
9825 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
9826 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
9827 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
9828 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
9829 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
9830 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
9831
9832 /*
9833 * GDTR and IDTR.
9834 */
9835#if HC_ARCH_BITS == 64
9836 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
9837 AssertRCBreak(rc);
9838 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
9839
9840 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
9841 AssertRCBreak(rc);
9842 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
9843#endif
9844
9845 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
9846 AssertRCBreak(rc);
9847 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
9848
9849 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
9850 AssertRCBreak(rc);
9851 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
9852
9853 /*
9854 * Guest Non-Register State.
9855 */
9856 /* Activity State. */
9857 uint32_t u32ActivityState;
9858 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
9859 AssertRCBreak(rc);
9860 HMVMX_CHECK_BREAK( !u32ActivityState
9861 || (u32ActivityState & RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
9862 VMX_IGS_ACTIVITY_STATE_INVALID);
9863 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
9864 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
9865 uint32_t u32IntrState;
9866 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
9867 AssertRCBreak(rc);
9868 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
9869 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
9870 {
9871 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
9872 }
9873
9874 /** @todo Activity state and injecting interrupts. Left as a todo since we
9875 * currently don't use activity states but ACTIVE. */
9876
9877 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
9878 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
9879
9880 /* Guest interruptibility-state. */
9881 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
9882 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
9883 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
9884 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
9885 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
9886 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
9887 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
9888 if (VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo))
9889 {
9890 if (VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_EXT_INT)
9891 {
9892 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
9893 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
9894 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
9895 }
9896 else if (VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_NMI)
9897 {
9898 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
9899 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
9900 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
9901 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
9902 }
9903 }
9904 /** @todo Assumes the processor is not in SMM. */
9905 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
9906 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
9907 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
9908 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
9909 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
9910 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
9911 && VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo)
9912 && VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_NMI)
9913 {
9914 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI),
9915 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
9916 }
9917
9918 /* Pending debug exceptions. */
9919#if HC_ARCH_BITS == 64
9920 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
9921 AssertRCBreak(rc);
9922 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
9923 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
9924 u32Val = u64Val; /* For pending debug exceptions checks below. */
9925#else
9926 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u32Val);
9927 AssertRCBreak(rc);
9928 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
9929 HMVMX_CHECK_BREAK(!(u32Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
9930#endif
9931
9932 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
9933 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
9934 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
9935 {
9936 if ( (u32Eflags & X86_EFL_TF)
9937 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
9938 {
9939 /* Bit 14 is PendingDebug.BS. */
9940 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
9941 }
9942 if ( !(u32Eflags & X86_EFL_TF)
9943 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
9944 {
9945 /* Bit 14 is PendingDebug.BS. */
9946 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
9947 }
9948 }
9949
9950 /* VMCS link pointer. */
9951 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
9952 AssertRCBreak(rc);
9953 if (u64Val != UINT64_C(0xffffffffffffffff))
9954 {
9955 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
9956 /** @todo Bits beyond the processor's physical-address width MBZ. */
9957 /** @todo 32-bit located in memory referenced by value of this field (as a
9958 * physical address) must contain the processor's VMCS revision ID. */
9959 /** @todo SMM checks. */
9960 }
9961
9962 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
9963 * not using nested paging? */
9964 if ( pVM->hm.s.fNestedPaging
9965 && !fLongModeGuest
9966 && CPUMIsGuestInPAEModeEx(pCtx))
9967 {
9968 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
9969 AssertRCBreak(rc);
9970 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9971
9972 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
9973 AssertRCBreak(rc);
9974 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9975
9976 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
9977 AssertRCBreak(rc);
9978 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9979
9980 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
9981 AssertRCBreak(rc);
9982 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9983 }
9984
9985 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
9986 if (uError == VMX_IGS_ERROR)
9987 uError = VMX_IGS_REASON_NOT_FOUND;
9988 } while (0);
9989
9990 pVCpu->hm.s.u32HMError = uError;
9991 return uError;
9992
9993#undef HMVMX_ERROR_BREAK
9994#undef HMVMX_CHECK_BREAK
9995}
9996
9997
9998/**
9999 * Setup the APIC-access page for virtualizing APIC access.
10000 *
10001 * This can cause a longjumps to R3 due to the acquisition of the PGM lock, hence
10002 * this not done as part of exporting guest state, see @bugref{8721}.
10003 *
10004 * @returns VBox status code.
10005 * @param pVCpu The cross context virtual CPU structure.
10006 */
10007static int hmR0VmxMapHCApicAccessPage(PVMCPU pVCpu)
10008{
10009 PVM pVM = pVCpu->CTX_SUFF(pVM);
10010 uint64_t const u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu);
10011
10012 Assert(PDMHasApic(pVM));
10013 Assert(u64MsrApicBase);
10014
10015 RTGCPHYS const GCPhysApicBase = u64MsrApicBase & PAGE_BASE_GC_MASK;
10016 Log4Func(("Mappping HC APIC-access page at %#RGp\n", GCPhysApicBase));
10017
10018 /* Unalias any existing mapping. */
10019 int rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
10020 AssertRCReturn(rc, rc);
10021
10022 /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */
10023 Assert(pVM->hm.s.vmx.HCPhysApicAccess != NIL_RTHCPHYS);
10024 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
10025 AssertRCReturn(rc, rc);
10026
10027 /* Update the per-VCPU cache of the APIC base MSR. */
10028 pVCpu->hm.s.vmx.u64GstMsrApicBase = u64MsrApicBase;
10029 return VINF_SUCCESS;
10030}
10031
10032
10033#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10034/**
10035 * Merges the guest with the nested-guest MSR bitmap in preparation of executing the
10036 * nested-guest using hardware-assisted VMX.
10037 *
10038 * @param pVCpu The cross context virtual CPU structure.
10039 * @param pVmcsInfoNstGst The nested-guest VMCS info. object.
10040 * @param pVmcsInfoGst The guest VMCS info. object.
10041 */
10042static void hmR0VmxMergeMsrBitmapNested(PCVMCPU pVCpu, PVMXVMCSINFO pVmcsInfoNstGst, PCVMXVMCSINFO pVmcsInfoGst)
10043{
10044 uint64_t const *pu64MsrBitmapNstGst = (uint64_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap);
10045 uint64_t const *pu64MsrBitmapGst = (uint64_t const *)pVmcsInfoGst->pvMsrBitmap;
10046 uint64_t *pu64MsrBitmap = (uint64_t *)pVmcsInfoNstGst->pvMsrBitmap;
10047 Assert(pu64MsrBitmapNstGst);
10048 Assert(pu64MsrBitmapGst);
10049 Assert(pu64MsrBitmap);
10050
10051 /*
10052 * We merge the guest MSR bitmap with the nested-guest MSR bitmap such that any
10053 * MSR that is intercepted by the guest is also intercepted while executing the
10054 * nested-guest using hardware-assisted VMX.
10055 */
10056 uint32_t const cbFrag = sizeof(uint64_t);
10057 uint32_t const cFrags = X86_PAGE_4K_SIZE / cbFrag;
10058 for (uint32_t i = 0; i <= cFrags; i++)
10059 pu64MsrBitmap[i] = pu64MsrBitmapNstGst[i] | pu64MsrBitmapGst[i];
10060}
10061
10062
10063/**
10064 * Merges the guest VMCS in to the nested-guest VMCS controls in preparation of
10065 * hardware-assisted VMX execution of the nested-guest.
10066 *
10067 * For a guest, we don't modify these controls once we set up the VMCS and hence
10068 * this function is never called.
10069 *
10070 * For nested-guests since the guest hypervisor provides these controls on every
10071 * nested-guest VM-entry and could potentially change them everytime we need to
10072 * merge them before every nested-guest VM-entry.
10073 *
10074 * @returns VBox status code.
10075 * @param pVCpu The cross context virtual CPU structure.
10076 */
10077static int hmR0VmxMergeVmcsNested(PVMCPU pVCpu)
10078{
10079 PVM pVM = pVCpu->CTX_SUFF(pVM);
10080 PCVMXVMCSINFO pVmcsInfoGst = &pVCpu->hm.s.vmx.VmcsInfo;
10081 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
10082 Assert(pVmcsNstGst);
10083
10084 /*
10085 * Merge the controls with the requirements of the guest VMCS.
10086 *
10087 * We do not need to validate the nested-guest VMX features specified in the
10088 * nested-guest VMCS with the features supported by the physical CPU as it's
10089 * already done by the VMLAUNCH/VMRESUME instruction emulation.
10090 *
10091 * This is because the VMX features exposed by CPUM (through CPUID/MSRs) to the
10092 * guest are derived from the VMX features supported by the physical CPU.
10093 */
10094
10095 /* Pin-based VM-execution controls. */
10096 uint32_t const u32PinCtls = pVmcsNstGst->u32PinCtls | pVmcsInfoGst->u32PinCtls;
10097
10098 /* Processor-based VM-execution controls. */
10099 uint32_t u32ProcCtls = (pVmcsNstGst->u32ProcCtls & ~VMX_PROC_CTLS_USE_IO_BITMAPS)
10100 | (pVmcsInfoGst->u32ProcCtls & ~( VMX_PROC_CTLS_INT_WINDOW_EXIT
10101 | VMX_PROC_CTLS_NMI_WINDOW_EXIT
10102 | VMX_PROC_CTLS_USE_TPR_SHADOW
10103 | VMX_PROC_CTLS_MONITOR_TRAP_FLAG));
10104
10105 /* Secondary processor-based VM-execution controls. */
10106 uint32_t const u32ProcCtls2 = (pVmcsNstGst->u32ProcCtls2 & ~VMX_PROC_CTLS2_VPID)
10107 | (pVmcsInfoGst->u32ProcCtls2 & ~( VMX_PROC_CTLS2_VIRT_APIC_ACCESS
10108 | VMX_PROC_CTLS2_INVPCID
10109 | VMX_PROC_CTLS2_RDTSCP
10110 | VMX_PROC_CTLS2_XSAVES_XRSTORS
10111 | VMX_PROC_CTLS2_APIC_REG_VIRT
10112 | VMX_PROC_CTLS2_VIRT_INT_DELIVERY
10113 | VMX_PROC_CTLS2_VMFUNC));
10114
10115 /*
10116 * VM-entry controls:
10117 * These controls contains state that depends on the nested-guest state (primarily
10118 * EFER MSR) and is thus not constant through VMLAUNCH/VMRESUME and the nested-guest
10119 * VM-exit. Although the nested-hypervisor cannot change it, we need to in order to
10120 * properly continue executing the nested-guest if the EFER MSR changes but does not
10121 * cause a nested-guest VM-exits.
10122 *
10123 * VM-exit controls:
10124 * These controls specify the host state on return. We cannot use the controls from
10125 * the nested-hypervisor state as is as it would contain the guest state rather than
10126 * the host state. Since the host state is subject to change (e.g. preemption, trips
10127 * to ring-3, longjmp and rescheduling to a different host CPU) they are not constant
10128 * through VMLAUNCH/VMRESUME and the nested-guest VM-exit.
10129 *
10130 * VM-entry MSR-load:
10131 * The guest MSRs from the VM-entry MSR-load area are already loaded into the
10132 * guest-CPU context by the VMLAUNCH/VMRESUME instruction emulation.
10133 *
10134 * VM-exit MSR-store:
10135 * The VM-exit emulation will take care of populating the MSRs from the guest-CPU
10136 * context back into the VM-exit MSR-store area.
10137 *
10138 * VM-exit MSR-load areas:
10139 * This must contain the real host MSRs with hardware-assisted VMX execution. Hence,
10140 * we can entirely ignore what the nested-hypervisor wants to load here.
10141 */
10142
10143 /*
10144 * Exception bitmap.
10145 *
10146 * We could remove #UD from the guest bitmap and merge it with the nested-guest
10147 * bitmap here (and avoid doing anything while exporting nested-guest state), but to
10148 * keep the code more flexible if intercepting exceptions become more dynamic in
10149 * the future we do it as part of exporting the nested-guest state.
10150 */
10151 uint32_t const u32XcptBitmap = pVmcsNstGst->u32XcptBitmap | pVmcsInfoGst->u32XcptBitmap;
10152
10153 /*
10154 * CR0/CR4 guest/host mask.
10155 *
10156 * Modifications by the nested-guest to CR0/CR4 bits owned by the host and the guest
10157 * must cause VM-exits, so we need to merge them here.
10158 */
10159 uint64_t const u64Cr0Mask = pVmcsNstGst->u64Cr0Mask.u | pVmcsInfoGst->u64Cr0Mask;
10160 uint64_t const u64Cr4Mask = pVmcsNstGst->u64Cr4Mask.u | pVmcsInfoGst->u64Cr4Mask;
10161
10162 /*
10163 * Page-fault error-code mask and match.
10164 *
10165 * Although we require unrestricted guest execution (and thereby nested-paging) for
10166 * hardware-assisted VMX execution of nested-guests and thus the outer guest doesn't
10167 * normally intercept #PFs, it might intercept them for debugging purposes.
10168 *
10169 * If the outer guest is not intercepting #PFs, we can use the nested-guest #PF
10170 * filters. If the outer guest is intercepting #PFs we must intercept all #PFs.
10171 */
10172 uint32_t u32XcptPFMask;
10173 uint32_t u32XcptPFMatch;
10174 if (!(pVmcsInfoGst->u32XcptBitmap & RT_BIT(X86_XCPT_PF)))
10175 {
10176 u32XcptPFMask = pVmcsNstGst->u32XcptPFMask;
10177 u32XcptPFMatch = pVmcsNstGst->u32XcptPFMatch;
10178 }
10179 else
10180 {
10181 u32XcptPFMask = 0;
10182 u32XcptPFMatch = 0;
10183 }
10184
10185 /*
10186 * Pause-Loop exiting.
10187 */
10188 uint32_t const cPleGapTicks = RT_MIN(pVM->hm.s.vmx.cPleGapTicks, pVmcsNstGst->u32PleGap);
10189 uint32_t const cPleWindowTicks = RT_MIN(pVM->hm.s.vmx.cPleWindowTicks, pVmcsNstGst->u32PleWindow);
10190
10191 /*
10192 * I/O Bitmap.
10193 *
10194 * We do not use the I/O bitmap that may be provided by the guest hypervisor as we
10195 * always intercept all I/O port accesses.
10196 */
10197 Assert(u32ProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT);
10198
10199 /*
10200 * APIC-access page.
10201 *
10202 * The APIC-access page address has already been initialized while setting up the
10203 * nested-guest VMCS. In theory, even if the guest-physical address is invalid, it
10204 * should not be on any consequence to the host or to the guest for that matter, but
10205 * we only accept valid addresses verified by the VMLAUNCH/VMRESUME instruction
10206 * emulation to keep it simple.
10207 */
10208
10209 /*
10210 * Virtual-APIC page and TPR threshold.
10211 *
10212 * We shall use the host-physical address of the virtual-APIC page in guest memory directly.
10213 * For this reason, we can access the virtual-APIC page of the nested-guest only using
10214 * PGM physical handlers as we must not assume a kernel virtual-address mapping exists and
10215 * requesting PGM for a mapping could be expensive/resource intensive (PGM mapping cache).
10216 */
10217 RTHCPHYS HCPhysVirtApic = NIL_RTHCPHYS;
10218 uint32_t const u32TprThreshold = pVmcsNstGst->u32TprThreshold;
10219 if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
10220 {
10221 int rc = PGMPhysGCPhys2HCPhys(pVM, pVmcsNstGst->u64AddrVirtApic.u, &HCPhysVirtApic);
10222
10223 /*
10224 * If the guest hypervisor has loaded crap into the virtual-APIC page field
10225 * we would fail to obtain a valid host-physical address for its guest-physical
10226 * address.
10227 *
10228 * We currently do not support this scenario. Maybe in the future if there is a
10229 * pressing need we can explore making this particular set of conditions work.
10230 * Right now we just cause a VM-entry failure.
10231 *
10232 * This has already been checked by VMLAUNCH/VMRESUME instruction emulation,
10233 * so should not really failure at the moment.
10234 */
10235 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
10236 }
10237 else
10238 {
10239 /*
10240 * We must make sure CR8 reads/write must cause VM-exits when TPR shadowing is not
10241 * used by the guest hypervisor. Preventing MMIO accesses to the physical APIC will
10242 * be taken care of by EPT/shadow paging.
10243 */
10244 if (pVM->hm.s.fAllow64BitGuests)
10245 {
10246 u32ProcCtls |= VMX_PROC_CTLS_CR8_STORE_EXIT
10247 | VMX_PROC_CTLS_CR8_LOAD_EXIT;
10248 }
10249 }
10250
10251 /*
10252 * Validate basic assumptions.
10253 */
10254 PVMXVMCSINFO pVmcsInfoNstGst = &pVCpu->hm.s.vmx.VmcsInfoNstGst;
10255 Assert(pVM->hm.s.vmx.fAllowUnrestricted);
10256 Assert(pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS);
10257 Assert(hmGetVmxActiveVmcsInfo(pVCpu) == pVmcsInfoNstGst);
10258
10259 /*
10260 * Commit it to the nested-guest VMCS.
10261 */
10262 int rc = VINF_SUCCESS;
10263 if (pVmcsInfoNstGst->u32PinCtls != u32PinCtls)
10264 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, u32PinCtls);
10265 if (pVmcsInfoNstGst->u32ProcCtls != u32ProcCtls)
10266 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, u32ProcCtls);
10267 if (pVmcsInfoNstGst->u32ProcCtls2 != u32ProcCtls2)
10268 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, u32ProcCtls2);
10269 if (pVmcsInfoNstGst->u32XcptBitmap != u32XcptBitmap)
10270 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
10271 if (pVmcsInfoNstGst->u64Cr0Mask != u64Cr0Mask)
10272 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask);
10273 if (pVmcsInfoNstGst->u64Cr4Mask != u64Cr4Mask)
10274 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask);
10275 if (pVmcsInfoNstGst->u32XcptPFMask != u32XcptPFMask)
10276 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, u32XcptPFMask);
10277 if (pVmcsInfoNstGst->u32XcptPFMatch != u32XcptPFMatch)
10278 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, u32XcptPFMatch);
10279 if ( !(u32ProcCtls & VMX_PROC_CTLS_PAUSE_EXIT)
10280 && (u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10281 {
10282 Assert(pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT);
10283 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, cPleGapTicks);
10284 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, cPleWindowTicks);
10285 }
10286 if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
10287 {
10288 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
10289 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, HCPhysVirtApic);
10290 }
10291 AssertRCReturn(rc, rc);
10292
10293 /*
10294 * Update the nested-guest VMCS cache.
10295 */
10296 pVmcsInfoNstGst->u32PinCtls = u32PinCtls;
10297 pVmcsInfoNstGst->u32ProcCtls = u32ProcCtls;
10298 pVmcsInfoNstGst->u32ProcCtls2 = u32ProcCtls2;
10299 pVmcsInfoNstGst->u32XcptBitmap = u32XcptBitmap;
10300 pVmcsInfoNstGst->u64Cr0Mask = u64Cr0Mask;
10301 pVmcsInfoNstGst->u64Cr4Mask = u64Cr4Mask;
10302 pVmcsInfoNstGst->u32XcptPFMask = u32XcptPFMask;
10303 pVmcsInfoNstGst->u32XcptPFMatch = u32XcptPFMatch;
10304 pVmcsInfoNstGst->HCPhysVirtApic = HCPhysVirtApic;
10305
10306 /*
10307 * MSR bitmap.
10308 *
10309 * The MSR bitmap address has already been initialized while setting up the
10310 * nested-guest VMCS, here we need to merge the MSR bitmaps.
10311 */
10312 if (u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
10313 hmR0VmxMergeMsrBitmapNested(pVCpu, pVmcsInfoNstGst, pVmcsInfoGst);
10314
10315 return VINF_SUCCESS;
10316}
10317#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10318
10319
10320/**
10321 * Does the preparations before executing guest code in VT-x.
10322 *
10323 * This may cause longjmps to ring-3 and may even result in rescheduling to the
10324 * recompiler/IEM. We must be cautious what we do here regarding committing
10325 * guest-state information into the VMCS assuming we assuredly execute the
10326 * guest in VT-x mode.
10327 *
10328 * If we fall back to the recompiler/IEM after updating the VMCS and clearing
10329 * the common-state (TRPM/forceflags), we must undo those changes so that the
10330 * recompiler/IEM can (and should) use them when it resumes guest execution.
10331 * Otherwise such operations must be done when we can no longer exit to ring-3.
10332 *
10333 * @returns Strict VBox status code (i.e. informational status codes too).
10334 * @retval VINF_SUCCESS if we can proceed with running the guest, interrupts
10335 * have been disabled.
10336 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a
10337 * double-fault into the guest.
10338 * @retval VINF_EM_DBG_STEPPED if @a fStepping is true and an event was
10339 * dispatched directly.
10340 * @retval VINF_* scheduling changes, we have to go back to ring-3.
10341 *
10342 * @param pVCpu The cross context virtual CPU structure.
10343 * @param pVmxTransient The VMX-transient structure.
10344 * @param fStepping Whether we are single-stepping the guest in the
10345 * hypervisor debugger. Makes us ignore some of the reasons
10346 * for returning to ring-3, and return VINF_EM_DBG_STEPPED
10347 * if event dispatching took place.
10348 */
10349static VBOXSTRICTRC hmR0VmxPreRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, bool fStepping)
10350{
10351 Assert(VMMRZCallRing3IsEnabled(pVCpu));
10352
10353#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
10354 if (pVmxTransient->fIsNestedGuest)
10355 {
10356 RT_NOREF2(pVCpu, fStepping);
10357 Log2Func(("Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
10358 return VINF_EM_RESCHEDULE_REM;
10359 }
10360#endif
10361
10362#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
10363 PGMRZDynMapFlushAutoSet(pVCpu);
10364#endif
10365
10366 /*
10367 * Check and process force flag actions, some of which might require us to go back to ring-3.
10368 */
10369 VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVCpu, fStepping);
10370 if (rcStrict == VINF_SUCCESS)
10371 { /* FFs don't get set all the time. */ }
10372 else
10373 return rcStrict;
10374
10375#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10376 /*
10377 * Switch to the nested-guest VMCS as we may have transitioned into executing
10378 * the nested-guest without leaving ring-0. Otherwise, if we came from ring-3
10379 * we would load the nested-guest VMCS while entering the VMX ring-0 session.
10380 *
10381 * We do this as late as possible to minimize (though not completely remove)
10382 * clearing/loading VMCS again due to premature trips to ring-3 above.
10383 */
10384 if (pVmxTransient->fIsNestedGuest)
10385 {
10386 if (!pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs)
10387 {
10388 /*
10389 * Ensure we have synced everything from the guest VMCS and also flag that
10390 * that we need to export the full (nested) guest-CPU context to the
10391 * nested-guest VMCS.
10392 */
10393 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
10394 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST);
10395
10396 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
10397 int rc = hmR0VmxSwitchVmcs(&pVCpu->hm.s.vmx.VmcsInfo, &pVCpu->hm.s.vmx.VmcsInfoNstGst);
10398 if (RT_LIKELY(rc == VINF_SUCCESS))
10399 {
10400 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs = true;
10401 ASMSetFlags(fEFlags);
10402 pVmxTransient->pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfoNstGst;
10403
10404 /*
10405 * We use a different VM-exit MSR-store area for the nested-guest. Hence,
10406 * flag that we need to update the host MSR values there. Even if we decide
10407 * in the future to share the VM-exit MSR-store area page with the guest,
10408 * if its content differs, we would have to update the host MSRs anyway.
10409 */
10410 pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = false;
10411 Assert(!pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer); /** @todo NSTVMX: Paranoia remove later. */
10412 }
10413 else
10414 {
10415 ASMSetFlags(fEFlags);
10416 return rc;
10417 }
10418 }
10419
10420 /*
10421 * Merge guest VMCS controls with the nested-guest VMCS controls.
10422 *
10423 * Even if we have not executed the guest prior to this (e.g. when resuming
10424 * from a saved state), we should be okay with merging controls as we
10425 * initialize the guest VMCS controls as part of VM setup phase.
10426 */
10427 if (!pVCpu->hm.s.vmx.fMergedNstGstCtls)
10428 {
10429 int rc = hmR0VmxMergeVmcsNested(pVCpu);
10430 AssertRCReturn(rc, rc);
10431 pVCpu->hm.s.vmx.fMergedNstGstCtls = true;
10432 }
10433 }
10434#endif
10435
10436 /*
10437 * Virtualize memory-mapped accesses to the physical APIC (may take locks).
10438 * We look at the guest VMCS control here as we always set it when supported by
10439 * the physical CPU. Looking at the nested-guest control here would not be
10440 * possible because they are not merged yet.
10441 */
10442 PVM pVM = pVCpu->CTX_SUFF(pVM);
10443 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10444 Assert(pVmcsInfo);
10445 if ( !pVCpu->hm.s.vmx.u64GstMsrApicBase
10446 && (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
10447 && PDMHasApic(pVM))
10448 {
10449 int rc = hmR0VmxMapHCApicAccessPage(pVCpu);
10450 AssertRCReturn(rc, rc);
10451 }
10452
10453 /*
10454 * Evaluate events to be injected into the guest.
10455 *
10456 * Events in TRPM can be injected without inspecting the guest state.
10457 * If any new events (interrupts/NMI) are pending currently, we try to set up the
10458 * guest to cause a VM-exit the next time they are ready to receive the event.
10459 */
10460 if (TRPMHasTrap(pVCpu))
10461 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
10462
10463 uint32_t fIntrState;
10464 rcStrict = hmR0VmxEvaluatePendingEvent(pVCpu, pVmxTransient, &fIntrState);
10465
10466#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10467 /*
10468 * While evaluating pending events if something failed (unlikely) or if we were
10469 * preparing to run a nested-guest but performed a nested-guest VM-exit, we should bail.
10470 */
10471 if ( rcStrict != VINF_SUCCESS
10472 || ( pVmxTransient->fIsNestedGuest
10473 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)))
10474 return rcStrict;
10475#endif
10476
10477 /*
10478 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus
10479 * needs to be done with longjmps or interrupts + preemption enabled. Event injection might
10480 * also result in triple-faulting the VM.
10481 *
10482 * The above does not apply when executing a nested-guest (since unrestricted guest execution
10483 * is a requirement) regardless doing it avoid duplicating code elsewhere.
10484 */
10485 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pVmxTransient, fIntrState, fStepping);
10486 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10487 { /* likely */ }
10488 else
10489 {
10490 AssertMsg(rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
10491 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10492 return rcStrict;
10493 }
10494
10495 /*
10496 * A longjump might result in importing CR3 even for VM-exits that don't necessarily
10497 * import CR3 themselves. We will need to update them here, as even as late as the above
10498 * hmR0VmxInjectPendingEvent() call may lazily import guest-CPU state on demand causing
10499 * the below force flags to be set.
10500 */
10501 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
10502 {
10503 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
10504 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
10505 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
10506 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
10507 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
10508 }
10509 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
10510 {
10511 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
10512 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
10513 }
10514
10515#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10516 /* Paranoia. */
10517 Assert(!pVmxTransient->fIsNestedGuest || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
10518#endif
10519
10520 /*
10521 * No longjmps to ring-3 from this point on!!!
10522 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
10523 * This also disables flushing of the R0-logger instance (if any).
10524 */
10525 VMMRZCallRing3Disable(pVCpu);
10526
10527 /*
10528 * Export the guest state bits.
10529 *
10530 * We cannot perform longjmps while loading the guest state because we do not preserve the
10531 * host/guest state (although the VMCS will be preserved) across longjmps which can cause
10532 * CPU migration.
10533 *
10534 * If we are injecting events to a real-on-v86 mode guest, we would have updated RIP and some segment
10535 * registers. Hence, loading of the guest state needs to be done -after- injection of events.
10536 */
10537 rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu, pVmxTransient);
10538 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10539 { /* likely */ }
10540 else
10541 {
10542 VMMRZCallRing3Enable(pVCpu);
10543 return rcStrict;
10544 }
10545
10546 /*
10547 * We disable interrupts so that we don't miss any interrupts that would flag preemption
10548 * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with
10549 * preemption disabled for a while. Since this is purely to aid the
10550 * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and
10551 * disable interrupt on NT.
10552 *
10553 * We need to check for force-flags that could've possible been altered since we last
10554 * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section,
10555 * see @bugref{6398}).
10556 *
10557 * We also check a couple of other force-flags as a last opportunity to get the EMT back
10558 * to ring-3 before executing guest code.
10559 */
10560 pVmxTransient->fEFlags = ASMIntDisableFlags();
10561
10562 if ( ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
10563 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
10564 || ( fStepping /* Optimized for the non-stepping case, so a bit of unnecessary work when stepping. */
10565 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) )
10566 {
10567 if (!RTThreadPreemptIsPending(NIL_RTTHREAD))
10568 {
10569 pVCpu->hm.s.Event.fPending = false;
10570
10571 /*
10572 * We've injected any pending events. This is really the point of no return (to ring-3).
10573 *
10574 * Note! The caller expects to continue with interrupts & longjmps disabled on successful
10575 * returns from this function, so don't enable them here.
10576 */
10577 return VINF_SUCCESS;
10578 }
10579
10580 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPendingHostIrq);
10581 rcStrict = VINF_EM_RAW_INTERRUPT;
10582 }
10583 else
10584 {
10585 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
10586 rcStrict = VINF_EM_RAW_TO_R3;
10587 }
10588
10589 ASMSetFlags(pVmxTransient->fEFlags);
10590 VMMRZCallRing3Enable(pVCpu);
10591
10592 return rcStrict;
10593}
10594
10595
10596/**
10597 * Final preparations before executing guest code using hardware-assisted VMX.
10598 *
10599 * We can no longer get preempted to a different host CPU and there are no returns
10600 * to ring-3. We ignore any errors that may happen from this point (e.g. VMWRITE
10601 * failures), this function is not intended to fail sans unrecoverable hardware
10602 * errors.
10603 *
10604 * @param pVCpu The cross context virtual CPU structure.
10605 * @param pVmxTransient The VMX-transient structure.
10606 *
10607 * @remarks Called with preemption disabled.
10608 * @remarks No-long-jump zone!!!
10609 */
10610static void hmR0VmxPreRunGuestCommitted(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
10611{
10612 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
10613 Assert(VMMR0IsLogFlushDisabled(pVCpu));
10614 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
10615 Assert(!pVCpu->hm.s.Event.fPending);
10616
10617 /*
10618 * Indicate start of guest execution and where poking EMT out of guest-context is recognized.
10619 */
10620 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
10621 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
10622
10623 PVM pVM = pVCpu->CTX_SUFF(pVM);
10624 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10625
10626 if (!CPUMIsGuestFPUStateActive(pVCpu))
10627 {
10628 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);
10629 if (CPUMR0LoadGuestFPU(pVM, pVCpu) == VINF_CPUM_HOST_CR0_MODIFIED)
10630 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT;
10631 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);
10632 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);
10633 }
10634
10635 /*
10636 * Re-save the host state bits as we may've been preempted (only happens when
10637 * thread-context hooks are used or when the VM start function changes).
10638 * The 64-on-32 switcher saves the (64-bit) host state into the VMCS and if we
10639 * changed the switcher back to 32-bit, we *must* save the 32-bit host state here,
10640 * see @bugref{8432}.
10641 *
10642 * This may also happen when switching to/from a nested-guest VMCS without leaving
10643 * ring-0.
10644 */
10645 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)
10646 {
10647 int rc = hmR0VmxExportHostState(pVCpu);
10648 AssertRC(rc);
10649 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreemptExportHostState);
10650 }
10651 Assert(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT));
10652
10653 /*
10654 * Export the state shared between host and guest (FPU, debug, lazy MSRs).
10655 */
10656 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)
10657 hmR0VmxExportSharedState(pVCpu, pVmxTransient);
10658 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
10659
10660 /*
10661 * Store status of the shared guest/host debug state at the time of VM-entry.
10662 */
10663#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
10664 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10665 {
10666 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
10667 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
10668 }
10669 else
10670#endif
10671 {
10672 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
10673 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
10674 }
10675
10676 /*
10677 * Always cache the TPR-shadow if the virtual-APIC page exists, thereby skipping
10678 * more than one conditional check. The post-run side of our code shall determine
10679 * if it needs to sync. the virtual APIC TPR with the TPR-shadow.
10680 */
10681 if (pVmcsInfo->pbVirtApic)
10682 pVmxTransient->u8GuestTpr = pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR];
10683
10684 /*
10685 * Update the host MSRs values in the VM-exit MSR-load area.
10686 */
10687 if (!pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs)
10688 {
10689 if (pVmcsInfo->cExitMsrLoad > 0)
10690 hmR0VmxUpdateAutoLoadHostMsrs(pVCpu, pVmcsInfo);
10691 pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = true;
10692 }
10693
10694 /*
10695 * Evaluate if we need to intercept guest RDTSC/P accesses. Set up the
10696 * VMX-preemption timer based on the next virtual sync clock deadline.
10697 */
10698 PHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
10699 RTCPUID const idCurrentCpu = pHostCpu->idCpu;
10700 if ( !pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer
10701 || idCurrentCpu != pVCpu->hm.s.idLastCpu)
10702 {
10703 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pVmxTransient);
10704 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = true;
10705 }
10706
10707 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */
10708 hmR0VmxFlushTaggedTlb(pHostCpu, pVCpu, pVmcsInfo); /* Invalidate the appropriate guest entries from the TLB. */
10709 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
10710 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */
10711
10712 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
10713
10714 TMNotifyStartOfExecution(pVCpu); /* Notify TM to resume its clocks when TSC is tied to execution,
10715 as we're about to start executing the guest . */
10716
10717 /*
10718 * Load the guest TSC_AUX MSR when we are not intercepting RDTSCP.
10719 *
10720 * This is done this late as updating the TSC offsetting/preemption timer above
10721 * figures out if we can skip intercepting RDTSCP by calculating the number of
10722 * host CPU ticks till the next virtual sync deadline (for the dynamic case).
10723 */
10724 if (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_RDTSCP)
10725 {
10726 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))
10727 {
10728 hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_TSC_AUX);
10729 /* NB: Because we call hmR0VmxAddAutoLoadStoreMsr with fUpdateHostMsr=true,
10730 it's safe even after hmR0VmxUpdateAutoLoadHostMsrs has already been done. */
10731 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu),
10732 true /* fSetReadWrite */, true /* fUpdateHostMsr */);
10733 AssertRC(rc);
10734 }
10735 else
10736 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_TSC_AUX);
10737 }
10738
10739#ifdef VBOX_STRICT
10740 Assert(pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs);
10741 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu, pVmcsInfo);
10742 hmR0VmxCheckHostEferMsr(pVCpu, pVmcsInfo);
10743 AssertRC(hmR0VmxCheckVmcsCtls(pVCpu, pVmcsInfo));
10744#endif
10745
10746#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
10747 /** @todo r=ramshankar: We can now probably use iemVmxVmentryCheckGuestState here.
10748 * Add a PVMXMSRS parameter to it, so that IEM can look at the host MSRs,
10749 * see @bugref{9180#c54}. */
10750 uint32_t const uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pVmcsInfo);
10751 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
10752 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
10753#endif
10754}
10755
10756
10757/**
10758 * First C routine invoked after running guest code using hardware-assisted VMX.
10759 *
10760 * @param pVCpu The cross context virtual CPU structure.
10761 * @param pVmxTransient The VMX-transient structure.
10762 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
10763 *
10764 * @remarks Called with interrupts disabled, and returns with interrupts enabled!
10765 *
10766 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
10767 * unconditionally when it is safe to do so.
10768 */
10769static void hmR0VmxPostRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, int rcVMRun)
10770{
10771 uint64_t const uHostTsc = ASMReadTSC(); /** @todo We can do a lot better here, see @bugref{9180#c38}. */
10772
10773 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
10774 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */
10775 pVCpu->hm.s.fCtxChanged = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */
10776 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
10777 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
10778 pVmxTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
10779
10780 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10781 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))
10782 {
10783 uint64_t uGstTsc;
10784 if (!pVmxTransient->fIsNestedGuest)
10785 uGstTsc = uHostTsc + pVmcsInfo->u64TscOffset;
10786 else
10787 {
10788 uint64_t const uNstGstTsc = uHostTsc + pVmcsInfo->u64TscOffset;
10789 uGstTsc = CPUMRemoveNestedGuestTscOffset(pVCpu, uNstGstTsc);
10790 }
10791 TMCpuTickSetLastSeen(pVCpu, uGstTsc); /* Update TM with the guest TSC. */
10792 }
10793
10794 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x);
10795 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
10796 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
10797
10798#if HC_ARCH_BITS == 64
10799 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Some host state messed up by VMX needs restoring. */
10800#endif
10801#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
10802 /* The 64-on-32 switcher maintains VMCS-launch state on its own
10803 and we need to leave it alone here. */
10804 if (pVmcsInfo->pfnStartVM != VMXR0SwitcherStartVM64)
10805 pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
10806#else
10807 pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
10808#endif
10809#ifdef VBOX_STRICT
10810 hmR0VmxCheckHostEferMsr(pVCpu, pVmcsInfo); /* Verify that the host EFER MSR wasn't modified. */
10811#endif
10812 Assert(!ASMIntAreEnabled());
10813 ASMSetFlags(pVmxTransient->fEFlags); /* Enable interrupts. */
10814 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
10815
10816 /*
10817 * Save the basic VM-exit reason and check if the VM-entry failed.
10818 * See Intel spec. 24.9.1 "Basic VM-exit Information".
10819 */
10820 uint32_t uExitReason;
10821 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
10822 AssertRC(rc);
10823 pVmxTransient->uExitReason = VMX_EXIT_REASON_BASIC(uExitReason);
10824 pVmxTransient->fVMEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
10825
10826 /*
10827 * Check if VMLAUNCH/VMRESUME succeeded.
10828 * If this failed, we cause a guru meditation and cease further execution.
10829 *
10830 * However, if we are executing a nested-guest we might fail if we use the
10831 * fast path rather than fully emulating VMLAUNCH/VMRESUME instruction in IEM.
10832 */
10833 if (RT_LIKELY(rcVMRun == VINF_SUCCESS))
10834 {
10835 /*
10836 * Update the VM-exit history array here even if the VM-entry failed due to:
10837 * - Invalid guest state.
10838 * - MSR loading.
10839 * - Machine-check event.
10840 *
10841 * In any of the above cases we will still have a "valid" VM-exit reason
10842 * despite @a fVMEntryFailed being false.
10843 *
10844 * See Intel spec. 26.7 "VM-Entry failures during or after loading guest state".
10845 *
10846 * Note! We don't have CS or RIP at this point. Will probably address that later
10847 * by amending the history entry added here.
10848 */
10849 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_VMX, pVmxTransient->uExitReason & EMEXIT_F_TYPE_MASK),
10850 UINT64_MAX, uHostTsc);
10851
10852 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
10853 {
10854 VMMRZCallRing3Enable(pVCpu);
10855
10856 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
10857 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
10858
10859#if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
10860 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10861 AssertRC(rc);
10862#elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
10863 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_RFLAGS);
10864 AssertRC(rc);
10865#else
10866 /*
10867 * Import the guest-interruptibility state always as we need it while evaluating
10868 * injecting events on re-entry.
10869 *
10870 * We don't import CR0 (when unrestricted guest execution is unavailable) despite
10871 * checking for real-mode while exporting the state because all bits that cause
10872 * mode changes wrt CR0 are intercepted.
10873 */
10874 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_HM_VMX_INT_STATE);
10875 AssertRC(rc);
10876#endif
10877
10878 /*
10879 * Sync the TPR shadow with our APIC state.
10880 */
10881 if ( !pVmxTransient->fIsNestedGuest
10882 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
10883 {
10884 Assert(pVmcsInfo->pbVirtApic);
10885 if (pVmxTransient->u8GuestTpr != pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR])
10886 {
10887 rc = APICSetTpr(pVCpu, pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR]);
10888 AssertRC(rc);
10889 ASMAtomicOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
10890 }
10891 }
10892
10893 Assert(VMMRZCallRing3IsEnabled(pVCpu));
10894 return;
10895 }
10896 }
10897#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10898 else if (pVmxTransient->fIsNestedGuest)
10899 {
10900# if 0
10901 /*
10902 * Copy the VM-instruction error field to the guest VMCS.
10903 */
10904 /** @todo NSTVMX: Verify we're using the fast path. */
10905 uint32_t u32RoVmInstrError;
10906 rc = VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &u32RoVmInstrError);
10907 AssertRCReturn(rc, rc);
10908 PVMXVVMCS pGstVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
10909 pGstVmcs->u32RoVmInstrError = u32RoVmInstrError;
10910 /** @todo NSTVMX: Advance guest RIP and other fast path related restoration. */
10911# else
10912 AssertMsgFailed(("VMLAUNCH/VMRESUME failed but shouldn't happen when VMLAUNCH/VMRESUME was emulated in IEM!\n"));
10913# endif
10914 }
10915#endif
10916 else
10917 Log4Func(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));
10918
10919 VMMRZCallRing3Enable(pVCpu);
10920}
10921
10922
10923/**
10924 * Runs the guest code using hardware-assisted VMX the normal way.
10925 *
10926 * @returns VBox status code.
10927 * @param pVCpu The cross context virtual CPU structure.
10928 * @param pcLoops Pointer to the number of executed loops.
10929 */
10930static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVMCPU pVCpu, uint32_t *pcLoops)
10931{
10932 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops;
10933 Assert(pcLoops);
10934 Assert(*pcLoops <= cMaxResumeLoops);
10935
10936 VMXTRANSIENT VmxTransient;
10937 RT_ZERO(VmxTransient);
10938 VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
10939
10940 /* Paranoia. */
10941 Assert(VmxTransient.pVmcsInfo == &pVCpu->hm.s.vmx.VmcsInfo);
10942 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
10943
10944 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
10945 for (;;)
10946 {
10947 Assert(!HMR0SuspendPending());
10948 HMVMX_ASSERT_CPU_SAFE(pVCpu);
10949 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
10950
10951 /*
10952 * Preparatory work for running nested-guest code, this may force us to
10953 * return to ring-3.
10954 *
10955 * Warning! This bugger disables interrupts on VINF_SUCCESS!
10956 */
10957 rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */);
10958 if (rcStrict != VINF_SUCCESS)
10959 break;
10960
10961 /* Interrupts are disabled at this point! */
10962 hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient);
10963 int rcRun = hmR0VmxRunGuest(pVCpu, &VmxTransient);
10964 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
10965 /* Interrupts are re-enabled at this point! */
10966
10967 /*
10968 * Check for errors with running the VM (VMLAUNCH/VMRESUME).
10969 */
10970 if (RT_SUCCESS(rcRun))
10971 { /* very likely */ }
10972 else
10973 {
10974 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
10975 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
10976 return rcRun;
10977 }
10978
10979 /*
10980 * Profile the VM-exit.
10981 */
10982 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
10983 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
10984 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
10985 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
10986 HMVMX_START_EXIT_DISPATCH_PROF();
10987
10988 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
10989
10990 /*
10991 * Handle the VM-exit.
10992 */
10993#ifdef HMVMX_USE_FUNCTION_TABLE
10994 rcStrict = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, &VmxTransient);
10995#else
10996 rcStrict = hmR0VmxHandleExit(pVCpu, &VmxTransient);
10997#endif
10998 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
10999 if (rcStrict == VINF_SUCCESS)
11000 {
11001 if (++(*pcLoops) <= cMaxResumeLoops)
11002 continue;
11003 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
11004 rcStrict = VINF_EM_RAW_INTERRUPT;
11005 }
11006 break;
11007 }
11008
11009 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
11010 return rcStrict;
11011}
11012
11013#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
11014/**
11015 * Runs the nested-guest code using hardware-assisted VMX.
11016 *
11017 * @returns VBox status code.
11018 * @param pVCpu The cross context virtual CPU structure.
11019 * @param pcLoops Pointer to the number of executed loops.
11020 *
11021 * @sa hmR0VmxRunGuestCodeNormal.
11022 */
11023static VBOXSTRICTRC hmR0VmxRunGuestCodeNested(PVMCPU pVCpu, uint32_t *pcLoops)
11024{
11025 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops;
11026 Assert(pcLoops);
11027 Assert(*pcLoops <= cMaxResumeLoops);
11028
11029 VMXTRANSIENT VmxTransient;
11030 RT_ZERO(VmxTransient);
11031 VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
11032 VmxTransient.fIsNestedGuest = true;
11033
11034 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
11035 for (;;)
11036 {
11037 Assert(!HMR0SuspendPending());
11038 HMVMX_ASSERT_CPU_SAFE(pVCpu);
11039 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
11040
11041 /*
11042 * Preparatory work for running guest code, this may force us to
11043 * return to ring-3.
11044 *
11045 * Warning! This bugger disables interrupts on VINF_SUCCESS!
11046 */
11047 rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */);
11048 if (rcStrict != VINF_SUCCESS)
11049 break;
11050
11051 /* Interrupts are disabled at this point! */
11052 hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient);
11053 int rcRun = hmR0VmxRunGuest(pVCpu, &VmxTransient);
11054 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
11055 /* Interrupts are re-enabled at this point! */
11056
11057 /*
11058 * Check for errors with running the VM (VMLAUNCH/VMRESUME).
11059 */
11060 if (RT_SUCCESS(rcRun))
11061 { /* very likely */ }
11062 else
11063 {
11064 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
11065 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
11066 return rcRun;
11067 }
11068
11069 /*
11070 * Profile the VM-exit.
11071 */
11072 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
11073 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
11074 STAM_COUNTER_INC(&pVCpu->hm.s.paStatNestedExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
11075 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
11076 HMVMX_START_EXIT_DISPATCH_PROF();
11077
11078 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
11079
11080 /*
11081 * Handle the VM-exit.
11082 */
11083 rcStrict = hmR0VmxHandleExitNested(pVCpu, &VmxTransient);
11084 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
11085 if ( rcStrict == VINF_SUCCESS
11086 && CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
11087 {
11088 if (++(*pcLoops) <= cMaxResumeLoops)
11089 continue;
11090 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
11091 rcStrict = VINF_EM_RAW_INTERRUPT;
11092 }
11093 break;
11094 }
11095
11096 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
11097 return rcStrict;
11098}
11099#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
11100
11101
11102/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
11103 * probes.
11104 *
11105 * The following few functions and associated structure contains the bloat
11106 * necessary for providing detailed debug events and dtrace probes as well as
11107 * reliable host side single stepping. This works on the principle of
11108 * "subclassing" the normal execution loop and workers. We replace the loop
11109 * method completely and override selected helpers to add necessary adjustments
11110 * to their core operation.
11111 *
11112 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
11113 * any performance for debug and analysis features.
11114 *
11115 * @{
11116 */
11117
11118/**
11119 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
11120 * the debug run loop.
11121 */
11122typedef struct VMXRUNDBGSTATE
11123{
11124 /** The RIP we started executing at. This is for detecting that we stepped. */
11125 uint64_t uRipStart;
11126 /** The CS we started executing with. */
11127 uint16_t uCsStart;
11128
11129 /** Whether we've actually modified the 1st execution control field. */
11130 bool fModifiedProcCtls : 1;
11131 /** Whether we've actually modified the 2nd execution control field. */
11132 bool fModifiedProcCtls2 : 1;
11133 /** Whether we've actually modified the exception bitmap. */
11134 bool fModifiedXcptBitmap : 1;
11135
11136 /** We desire the modified the CR0 mask to be cleared. */
11137 bool fClearCr0Mask : 1;
11138 /** We desire the modified the CR4 mask to be cleared. */
11139 bool fClearCr4Mask : 1;
11140 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
11141 uint32_t fCpe1Extra;
11142 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
11143 uint32_t fCpe1Unwanted;
11144 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
11145 uint32_t fCpe2Extra;
11146 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
11147 uint32_t bmXcptExtra;
11148 /** The sequence number of the Dtrace provider settings the state was
11149 * configured against. */
11150 uint32_t uDtraceSettingsSeqNo;
11151 /** VM-exits to check (one bit per VM-exit). */
11152 uint32_t bmExitsToCheck[3];
11153
11154 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
11155 uint32_t fProcCtlsInitial;
11156 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
11157 uint32_t fProcCtls2Initial;
11158 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
11159 uint32_t bmXcptInitial;
11160} VMXRUNDBGSTATE;
11161AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
11162typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
11163
11164
11165/**
11166 * Initializes the VMXRUNDBGSTATE structure.
11167 *
11168 * @param pVCpu The cross context virtual CPU structure of the
11169 * calling EMT.
11170 * @param pVmxTransient The VMX-transient structure.
11171 * @param pDbgState The debug state to initialize.
11172 */
11173static void hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11174{
11175 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
11176 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
11177
11178 pDbgState->fModifiedProcCtls = false;
11179 pDbgState->fModifiedProcCtls2 = false;
11180 pDbgState->fModifiedXcptBitmap = false;
11181 pDbgState->fClearCr0Mask = false;
11182 pDbgState->fClearCr4Mask = false;
11183 pDbgState->fCpe1Extra = 0;
11184 pDbgState->fCpe1Unwanted = 0;
11185 pDbgState->fCpe2Extra = 0;
11186 pDbgState->bmXcptExtra = 0;
11187 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
11188 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
11189 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
11190}
11191
11192
11193/**
11194 * Updates the VMSC fields with changes requested by @a pDbgState.
11195 *
11196 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
11197 * immediately before executing guest code, i.e. when interrupts are disabled.
11198 * We don't check status codes here as we cannot easily assert or return in the
11199 * latter case.
11200 *
11201 * @param pVCpu The cross context virtual CPU structure.
11202 * @param pVmxTransient The VMX-transient structure.
11203 * @param pDbgState The debug state.
11204 */
11205static void hmR0VmxPreRunGuestDebugStateApply(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11206{
11207 /*
11208 * Ensure desired flags in VMCS control fields are set.
11209 * (Ignoring write failure here, as we're committed and it's just debug extras.)
11210 *
11211 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
11212 * there should be no stale data in pCtx at this point.
11213 */
11214 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11215 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
11216 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
11217 {
11218 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
11219 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
11220 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
11221 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
11222 pDbgState->fModifiedProcCtls = true;
11223 }
11224
11225 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11226 {
11227 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11228 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11229 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11230 pDbgState->fModifiedProcCtls2 = true;
11231 }
11232
11233 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11234 {
11235 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11236 VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11237 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11238 pDbgState->fModifiedXcptBitmap = true;
11239 }
11240
11241 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11242 {
11243 pVmcsInfo->u64Cr0Mask = 0;
11244 VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, 0);
11245 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11246 }
11247
11248 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11249 {
11250 pVmcsInfo->u64Cr4Mask = 0;
11251 VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, 0);
11252 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11253 }
11254
11255 NOREF(pVCpu);
11256}
11257
11258
11259/**
11260 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11261 * re-entry next time around.
11262 *
11263 * @returns Strict VBox status code (i.e. informational status codes too).
11264 * @param pVCpu The cross context virtual CPU structure.
11265 * @param pVmxTransient The VMX-transient structure.
11266 * @param pDbgState The debug state.
11267 * @param rcStrict The return code from executing the guest using single
11268 * stepping.
11269 */
11270static VBOXSTRICTRC hmR0VmxRunDebugStateRevert(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11271 VBOXSTRICTRC rcStrict)
11272{
11273 /*
11274 * Restore VM-exit control settings as we may not reenter this function the
11275 * next time around.
11276 */
11277 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11278
11279 /* We reload the initial value, trigger what we can of recalculations the
11280 next time around. From the looks of things, that's all that's required atm. */
11281 if (pDbgState->fModifiedProcCtls)
11282 {
11283 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11284 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11285 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11286 AssertRCReturn(rc2, rc2);
11287 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11288 }
11289
11290 /* We're currently the only ones messing with this one, so just restore the
11291 cached value and reload the field. */
11292 if ( pDbgState->fModifiedProcCtls2
11293 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11294 {
11295 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11296 AssertRCReturn(rc2, rc2);
11297 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11298 }
11299
11300 /* If we've modified the exception bitmap, we restore it and trigger
11301 reloading and partial recalculation the next time around. */
11302 if (pDbgState->fModifiedXcptBitmap)
11303 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11304
11305 return rcStrict;
11306}
11307
11308
11309/**
11310 * Configures VM-exit controls for current DBGF and DTrace settings.
11311 *
11312 * This updates @a pDbgState and the VMCS execution control fields to reflect
11313 * the necessary VM-exits demanded by DBGF and DTrace.
11314 *
11315 * @param pVCpu The cross context virtual CPU structure.
11316 * @param pVmxTransient The VMX-transient structure. May update
11317 * fUpdatedTscOffsettingAndPreemptTimer.
11318 * @param pDbgState The debug state.
11319 */
11320static void hmR0VmxPreRunGuestDebugStateUpdate(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11321{
11322 /*
11323 * Take down the dtrace serial number so we can spot changes.
11324 */
11325 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11326 ASMCompilerBarrier();
11327
11328 /*
11329 * We'll rebuild most of the middle block of data members (holding the
11330 * current settings) as we go along here, so start by clearing it all.
11331 */
11332 pDbgState->bmXcptExtra = 0;
11333 pDbgState->fCpe1Extra = 0;
11334 pDbgState->fCpe1Unwanted = 0;
11335 pDbgState->fCpe2Extra = 0;
11336 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11337 pDbgState->bmExitsToCheck[i] = 0;
11338
11339 /*
11340 * Software interrupts (INT XXh) - no idea how to trigger these...
11341 */
11342 PVM pVM = pVCpu->CTX_SUFF(pVM);
11343 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11344 || VBOXVMM_INT_SOFTWARE_ENABLED())
11345 {
11346 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11347 }
11348
11349 /*
11350 * INT3 breakpoints - triggered by #BP exceptions.
11351 */
11352 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11353 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11354
11355 /*
11356 * Exception bitmap and XCPT events+probes.
11357 */
11358 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11359 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11360 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11361
11362 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11363 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11364 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11365 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11366 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11367 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11368 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11369 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11370 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11371 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11372 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11373 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11374 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11375 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11376 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11377 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11378 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11379 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11380
11381 if (pDbgState->bmXcptExtra)
11382 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11383
11384 /*
11385 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11386 *
11387 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11388 * So, when adding/changing/removing please don't forget to update it.
11389 *
11390 * Some of the macros are picking up local variables to save horizontal space,
11391 * (being able to see it in a table is the lesser evil here).
11392 */
11393#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11394 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11395 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11396#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11397 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11398 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11399 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11400 } else do { } while (0)
11401#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11402 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11403 { \
11404 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11405 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11406 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11407 } else do { } while (0)
11408#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11409 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11410 { \
11411 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11412 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11413 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11414 } else do { } while (0)
11415#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11416 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11417 { \
11418 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11419 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11420 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11421 } else do { } while (0)
11422
11423 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11424 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11425 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11426 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11427 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11428
11429 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11430 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11431 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11432 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11433 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11434 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11435 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11436 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11437 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11438 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11439 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11440 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11441 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11442 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11443 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11444 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11445 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11446 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11447 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11448 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11449 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11450 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11451 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11452 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11453 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11454 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11455 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11456 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11457 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11458 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11459 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11460 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11461 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11462 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11463 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11464 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11465
11466 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11467 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11468 {
11469 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
11470 | CPUMCTX_EXTRN_APIC_TPR);
11471 AssertRC(rc);
11472
11473#if 0 /** @todo fix me */
11474 pDbgState->fClearCr0Mask = true;
11475 pDbgState->fClearCr4Mask = true;
11476#endif
11477 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11478 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11479 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11480 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11481 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11482 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11483 require clearing here and in the loop if we start using it. */
11484 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11485 }
11486 else
11487 {
11488 if (pDbgState->fClearCr0Mask)
11489 {
11490 pDbgState->fClearCr0Mask = false;
11491 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);
11492 }
11493 if (pDbgState->fClearCr4Mask)
11494 {
11495 pDbgState->fClearCr4Mask = false;
11496 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4);
11497 }
11498 }
11499 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11500 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11501
11502 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11503 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11504 {
11505 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11506 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11507 }
11508 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11509 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11510
11511 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11512 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11513 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11514 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11515 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11516 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11517 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11518 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11519#if 0 /** @todo too slow, fix handler. */
11520 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11521#endif
11522 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11523
11524 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11525 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11526 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11527 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11528 {
11529 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11530 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11531 }
11532 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11533 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11534 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11535 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11536
11537 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11538 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11539 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11540 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11541 {
11542 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11543 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11544 }
11545 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11546 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11547 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11548 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11549
11550 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11551 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11552 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11553 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11554 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11555 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11556 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11557 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11558 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11559 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11560 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11561 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11562 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11563 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11564 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11565 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11566 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11567 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11568 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11569 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11570 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11571 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11572
11573#undef IS_EITHER_ENABLED
11574#undef SET_ONLY_XBM_IF_EITHER_EN
11575#undef SET_CPE1_XBM_IF_EITHER_EN
11576#undef SET_CPEU_XBM_IF_EITHER_EN
11577#undef SET_CPE2_XBM_IF_EITHER_EN
11578
11579 /*
11580 * Sanitize the control stuff.
11581 */
11582 pDbgState->fCpe2Extra &= pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1;
11583 if (pDbgState->fCpe2Extra)
11584 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11585 pDbgState->fCpe1Extra &= pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1;
11586 pDbgState->fCpe1Unwanted &= ~pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0;
11587 if (pVCpu->hm.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11588 {
11589 pVCpu->hm.s.fDebugWantRdTscExit ^= true;
11590 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11591 }
11592
11593 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11594 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11595 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11596 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11597}
11598
11599
11600/**
11601 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11602 * appropriate.
11603 *
11604 * The caller has checked the VM-exit against the
11605 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11606 * already, so we don't have to do that either.
11607 *
11608 * @returns Strict VBox status code (i.e. informational status codes too).
11609 * @param pVCpu The cross context virtual CPU structure.
11610 * @param pVmxTransient The VMX-transient structure.
11611 * @param uExitReason The VM-exit reason.
11612 *
11613 * @remarks The name of this function is displayed by dtrace, so keep it short
11614 * and to the point. No longer than 33 chars long, please.
11615 */
11616static VBOXSTRICTRC hmR0VmxHandleExitDtraceEvents(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11617{
11618 /*
11619 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11620 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11621 *
11622 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11623 * does. Must add/change/remove both places. Same ordering, please.
11624 *
11625 * Added/removed events must also be reflected in the next section
11626 * where we dispatch dtrace events.
11627 */
11628 bool fDtrace1 = false;
11629 bool fDtrace2 = false;
11630 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11631 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11632 uint32_t uEventArg = 0;
11633#define SET_EXIT(a_EventSubName) \
11634 do { \
11635 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11636 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11637 } while (0)
11638#define SET_BOTH(a_EventSubName) \
11639 do { \
11640 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11641 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11642 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11643 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11644 } while (0)
11645 switch (uExitReason)
11646 {
11647 case VMX_EXIT_MTF:
11648 return hmR0VmxExitMtf(pVCpu, pVmxTransient);
11649
11650 case VMX_EXIT_XCPT_OR_NMI:
11651 {
11652 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11653 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11654 {
11655 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11656 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11657 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11658 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11659 {
11660 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11661 {
11662 hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11663 uEventArg = pVmxTransient->uExitIntErrorCode;
11664 }
11665 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11666 switch (enmEvent1)
11667 {
11668 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11669 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11670 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11671 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11672 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11673 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11674 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11675 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11676 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11677 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11678 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11679 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11680 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11681 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11682 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11683 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11684 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11685 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11686 default: break;
11687 }
11688 }
11689 else
11690 AssertFailed();
11691 break;
11692
11693 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11694 uEventArg = idxVector;
11695 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11696 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11697 break;
11698 }
11699 break;
11700 }
11701
11702 case VMX_EXIT_TRIPLE_FAULT:
11703 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11704 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11705 break;
11706 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11707 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11708 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11709 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11710 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11711
11712 /* Instruction specific VM-exits: */
11713 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11714 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11715 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11716 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11717 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11718 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11719 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11720 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11721 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11722 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11723 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11724 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11725 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11726 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11727 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11728 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11729 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11730 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11731 case VMX_EXIT_MOV_CRX:
11732 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
11733 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11734 SET_BOTH(CRX_READ);
11735 else
11736 SET_BOTH(CRX_WRITE);
11737 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11738 break;
11739 case VMX_EXIT_MOV_DRX:
11740 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
11741 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11742 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11743 SET_BOTH(DRX_READ);
11744 else
11745 SET_BOTH(DRX_WRITE);
11746 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11747 break;
11748 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11749 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11750 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11751 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11752 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11753 case VMX_EXIT_GDTR_IDTR_ACCESS:
11754 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
11755 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11756 {
11757 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11758 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11759 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11760 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11761 }
11762 break;
11763
11764 case VMX_EXIT_LDTR_TR_ACCESS:
11765 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
11766 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11767 {
11768 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11769 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11770 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11771 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11772 }
11773 break;
11774
11775 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11776 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11777 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11778 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11779 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11780 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11781 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11782 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11783 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11784 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11785 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11786
11787 /* Events that aren't relevant at this point. */
11788 case VMX_EXIT_EXT_INT:
11789 case VMX_EXIT_INT_WINDOW:
11790 case VMX_EXIT_NMI_WINDOW:
11791 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11792 case VMX_EXIT_PREEMPT_TIMER:
11793 case VMX_EXIT_IO_INSTR:
11794 break;
11795
11796 /* Errors and unexpected events. */
11797 case VMX_EXIT_INIT_SIGNAL:
11798 case VMX_EXIT_SIPI:
11799 case VMX_EXIT_IO_SMI:
11800 case VMX_EXIT_SMI:
11801 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11802 case VMX_EXIT_ERR_MSR_LOAD:
11803 case VMX_EXIT_ERR_MACHINE_CHECK:
11804 case VMX_EXIT_PML_FULL:
11805 case VMX_EXIT_VIRTUALIZED_EOI:
11806 break;
11807
11808 default:
11809 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11810 break;
11811 }
11812#undef SET_BOTH
11813#undef SET_EXIT
11814
11815 /*
11816 * Dtrace tracepoints go first. We do them here at once so we don't
11817 * have to copy the guest state saving and stuff a few dozen times.
11818 * Down side is that we've got to repeat the switch, though this time
11819 * we use enmEvent since the probes are a subset of what DBGF does.
11820 */
11821 if (fDtrace1 || fDtrace2)
11822 {
11823 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
11824 hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11825 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11826 switch (enmEvent1)
11827 {
11828 /** @todo consider which extra parameters would be helpful for each probe. */
11829 case DBGFEVENT_END: break;
11830 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11831 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11832 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11833 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11834 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11835 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11836 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11837 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11838 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11839 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11840 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11841 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11842 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11843 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11844 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11845 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11846 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11847 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11848 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11849 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11850 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11851 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11852 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11853 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11854 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11855 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11856 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11857 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11858 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11859 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11860 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11861 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11862 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11863 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11864 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11865 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11866 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11867 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11868 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11869 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11870 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11871 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11872 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11873 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11874 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11875 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11876 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11877 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11878 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11879 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11880 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11881 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11882 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11883 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11884 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11885 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11886 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11887 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11888 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11889 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11890 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11891 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11892 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11893 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11894 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11895 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11896 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11897 }
11898 switch (enmEvent2)
11899 {
11900 /** @todo consider which extra parameters would be helpful for each probe. */
11901 case DBGFEVENT_END: break;
11902 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11903 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11904 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11905 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11906 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11907 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11908 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11909 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11910 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11911 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11912 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11913 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11914 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11915 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11916 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11917 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11918 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11919 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11920 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11921 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11922 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11923 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11924 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11925 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11926 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11927 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11928 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11929 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11930 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11931 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11932 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11933 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11934 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11935 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11936 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11937 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11938 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11939 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11940 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11941 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11942 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11943 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11944 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11945 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11946 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11947 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11948 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11949 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11950 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11951 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11952 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11953 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11954 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11955 }
11956 }
11957
11958 /*
11959 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11960 * the DBGF call will do a full check).
11961 *
11962 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11963 * Note! If we have to events, we prioritize the first, i.e. the instruction
11964 * one, in order to avoid event nesting.
11965 */
11966 PVM pVM = pVCpu->CTX_SUFF(pVM);
11967 if ( enmEvent1 != DBGFEVENT_END
11968 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11969 {
11970 hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11971 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11972 if (rcStrict != VINF_SUCCESS)
11973 return rcStrict;
11974 }
11975 else if ( enmEvent2 != DBGFEVENT_END
11976 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11977 {
11978 hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11979 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11980 if (rcStrict != VINF_SUCCESS)
11981 return rcStrict;
11982 }
11983
11984 return VINF_SUCCESS;
11985}
11986
11987
11988/**
11989 * Single-stepping VM-exit filtering.
11990 *
11991 * This is preprocessing the VM-exits and deciding whether we've gotten far
11992 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11993 * handling is performed.
11994 *
11995 * @returns Strict VBox status code (i.e. informational status codes too).
11996 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11997 * @param pVmxTransient The VMX-transient structure.
11998 * @param pDbgState The debug state.
11999 */
12000DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
12001{
12002 /*
12003 * Expensive (saves context) generic dtrace VM-exit probe.
12004 */
12005 uint32_t const uExitReason = pVmxTransient->uExitReason;
12006 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
12007 { /* more likely */ }
12008 else
12009 {
12010 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
12011 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
12012 AssertRC(rc);
12013 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
12014 }
12015
12016 /*
12017 * Check for host NMI, just to get that out of the way.
12018 */
12019 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
12020 { /* normally likely */ }
12021 else
12022 {
12023 int rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
12024 AssertRCReturn(rc2, rc2);
12025 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
12026 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
12027 return hmR0VmxExitHostNmi(pVCpu);
12028 }
12029
12030 /*
12031 * Check for single stepping event if we're stepping.
12032 */
12033 if (pVCpu->hm.s.fSingleInstruction)
12034 {
12035 switch (uExitReason)
12036 {
12037 case VMX_EXIT_MTF:
12038 return hmR0VmxExitMtf(pVCpu, pVmxTransient);
12039
12040 /* Various events: */
12041 case VMX_EXIT_XCPT_OR_NMI:
12042 case VMX_EXIT_EXT_INT:
12043 case VMX_EXIT_TRIPLE_FAULT:
12044 case VMX_EXIT_INT_WINDOW:
12045 case VMX_EXIT_NMI_WINDOW:
12046 case VMX_EXIT_TASK_SWITCH:
12047 case VMX_EXIT_TPR_BELOW_THRESHOLD:
12048 case VMX_EXIT_APIC_ACCESS:
12049 case VMX_EXIT_EPT_VIOLATION:
12050 case VMX_EXIT_EPT_MISCONFIG:
12051 case VMX_EXIT_PREEMPT_TIMER:
12052
12053 /* Instruction specific VM-exits: */
12054 case VMX_EXIT_CPUID:
12055 case VMX_EXIT_GETSEC:
12056 case VMX_EXIT_HLT:
12057 case VMX_EXIT_INVD:
12058 case VMX_EXIT_INVLPG:
12059 case VMX_EXIT_RDPMC:
12060 case VMX_EXIT_RDTSC:
12061 case VMX_EXIT_RSM:
12062 case VMX_EXIT_VMCALL:
12063 case VMX_EXIT_VMCLEAR:
12064 case VMX_EXIT_VMLAUNCH:
12065 case VMX_EXIT_VMPTRLD:
12066 case VMX_EXIT_VMPTRST:
12067 case VMX_EXIT_VMREAD:
12068 case VMX_EXIT_VMRESUME:
12069 case VMX_EXIT_VMWRITE:
12070 case VMX_EXIT_VMXOFF:
12071 case VMX_EXIT_VMXON:
12072 case VMX_EXIT_MOV_CRX:
12073 case VMX_EXIT_MOV_DRX:
12074 case VMX_EXIT_IO_INSTR:
12075 case VMX_EXIT_RDMSR:
12076 case VMX_EXIT_WRMSR:
12077 case VMX_EXIT_MWAIT:
12078 case VMX_EXIT_MONITOR:
12079 case VMX_EXIT_PAUSE:
12080 case VMX_EXIT_GDTR_IDTR_ACCESS:
12081 case VMX_EXIT_LDTR_TR_ACCESS:
12082 case VMX_EXIT_INVEPT:
12083 case VMX_EXIT_RDTSCP:
12084 case VMX_EXIT_INVVPID:
12085 case VMX_EXIT_WBINVD:
12086 case VMX_EXIT_XSETBV:
12087 case VMX_EXIT_RDRAND:
12088 case VMX_EXIT_INVPCID:
12089 case VMX_EXIT_VMFUNC:
12090 case VMX_EXIT_RDSEED:
12091 case VMX_EXIT_XSAVES:
12092 case VMX_EXIT_XRSTORS:
12093 {
12094 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
12095 AssertRCReturn(rc, rc);
12096 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
12097 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
12098 return VINF_EM_DBG_STEPPED;
12099 break;
12100 }
12101
12102 /* Errors and unexpected events: */
12103 case VMX_EXIT_INIT_SIGNAL:
12104 case VMX_EXIT_SIPI:
12105 case VMX_EXIT_IO_SMI:
12106 case VMX_EXIT_SMI:
12107 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
12108 case VMX_EXIT_ERR_MSR_LOAD:
12109 case VMX_EXIT_ERR_MACHINE_CHECK:
12110 case VMX_EXIT_PML_FULL:
12111 case VMX_EXIT_VIRTUALIZED_EOI:
12112 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
12113 break;
12114
12115 default:
12116 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
12117 break;
12118 }
12119 }
12120
12121 /*
12122 * Check for debugger event breakpoints and dtrace probes.
12123 */
12124 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
12125 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
12126 {
12127 VBOXSTRICTRC rcStrict = hmR0VmxHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
12128 if (rcStrict != VINF_SUCCESS)
12129 return rcStrict;
12130 }
12131
12132 /*
12133 * Normal processing.
12134 */
12135#ifdef HMVMX_USE_FUNCTION_TABLE
12136 return g_apfnVMExitHandlers[uExitReason](pVCpu, pVmxTransient);
12137#else
12138 return hmR0VmxHandleExit(pVCpu, pVmxTransient, uExitReason);
12139#endif
12140}
12141
12142
12143/**
12144 * Single steps guest code using hardware-assisted VMX.
12145 *
12146 * This is -not- the same as the guest single-stepping itself (say using EFLAGS.TF)
12147 * but single-stepping through the hypervisor debugger.
12148 *
12149 * @returns Strict VBox status code (i.e. informational status codes too).
12150 * @param pVCpu The cross context virtual CPU structure.
12151 * @param pcLoops Pointer to the number of executed loops.
12152 *
12153 * @note Mostly the same as hmR0VmxRunGuestCodeNormal().
12154 */
12155static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVMCPU pVCpu, uint32_t *pcLoops)
12156{
12157 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops;
12158 Assert(pcLoops);
12159 Assert(*pcLoops <= cMaxResumeLoops);
12160
12161 VMXTRANSIENT VmxTransient;
12162 RT_ZERO(VmxTransient);
12163 VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
12164
12165 /* Set HMCPU indicators. */
12166 bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction;
12167 pVCpu->hm.s.fSingleInstruction = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
12168 pVCpu->hm.s.fDebugWantRdTscExit = false;
12169 pVCpu->hm.s.fUsingDebugLoop = true;
12170
12171 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */
12172 VMXRUNDBGSTATE DbgState;
12173 hmR0VmxRunDebugStateInit(pVCpu, &VmxTransient, &DbgState);
12174 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);
12175
12176 /*
12177 * The loop.
12178 */
12179 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
12180 for (;;)
12181 {
12182 Assert(!HMR0SuspendPending());
12183 HMVMX_ASSERT_CPU_SAFE(pVCpu);
12184 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
12185 bool fStepping = pVCpu->hm.s.fSingleInstruction;
12186
12187 /* Set up VM-execution controls the next two can respond to. */
12188 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
12189
12190 /*
12191 * Preparatory work for running guest code, this may force us to
12192 * return to ring-3.
12193 *
12194 * Warning! This bugger disables interrupts on VINF_SUCCESS!
12195 */
12196 rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, fStepping);
12197 if (rcStrict != VINF_SUCCESS)
12198 break;
12199
12200 /* Interrupts are disabled at this point! */
12201 hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient);
12202
12203 /* Override any obnoxious code in the above two calls. */
12204 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
12205
12206 /*
12207 * Finally execute the guest.
12208 */
12209 int rcRun = hmR0VmxRunGuest(pVCpu, &VmxTransient);
12210
12211 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
12212 /* Interrupts are re-enabled at this point! */
12213
12214 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
12215 if (RT_SUCCESS(rcRun))
12216 { /* very likely */ }
12217 else
12218 {
12219 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
12220 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
12221 return rcRun;
12222 }
12223
12224 /* Profile the VM-exit. */
12225 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
12226 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
12227 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
12228 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
12229 HMVMX_START_EXIT_DISPATCH_PROF();
12230
12231 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
12232
12233 /*
12234 * Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitDebug().
12235 */
12236 rcStrict = hmR0VmxRunDebugHandleExit(pVCpu, &VmxTransient, &DbgState);
12237 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
12238 if (rcStrict != VINF_SUCCESS)
12239 break;
12240 if (++(*pcLoops) > cMaxResumeLoops)
12241 {
12242 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
12243 rcStrict = VINF_EM_RAW_INTERRUPT;
12244 break;
12245 }
12246
12247 /*
12248 * Stepping: Did the RIP change, if so, consider it a single step.
12249 * Otherwise, make sure one of the TFs gets set.
12250 */
12251 if (fStepping)
12252 {
12253 int rc = hmR0VmxImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
12254 AssertRC(rc);
12255 if ( pVCpu->cpum.GstCtx.rip != DbgState.uRipStart
12256 || pVCpu->cpum.GstCtx.cs.Sel != DbgState.uCsStart)
12257 {
12258 rcStrict = VINF_EM_DBG_STEPPED;
12259 break;
12260 }
12261 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
12262 }
12263
12264 /*
12265 * Update when dtrace settings changes (DBGF kicks us, so no need to check).
12266 */
12267 if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo)
12268 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);
12269 }
12270
12271 /*
12272 * Clear the X86_EFL_TF if necessary.
12273 */
12274 if (pVCpu->hm.s.fClearTrapFlag)
12275 {
12276 int rc = hmR0VmxImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
12277 AssertRC(rc);
12278 pVCpu->hm.s.fClearTrapFlag = false;
12279 pVCpu->cpum.GstCtx.eflags.Bits.u1TF = 0;
12280 }
12281 /** @todo there seems to be issues with the resume flag when the monitor trap
12282 * flag is pending without being used. Seen early in bios init when
12283 * accessing APIC page in protected mode. */
12284
12285 /*
12286 * Restore VM-exit control settings as we may not re-enter this function the
12287 * next time around.
12288 */
12289 rcStrict = hmR0VmxRunDebugStateRevert(pVCpu, &VmxTransient, &DbgState, rcStrict);
12290
12291 /* Restore HMCPU indicators. */
12292 pVCpu->hm.s.fUsingDebugLoop = false;
12293 pVCpu->hm.s.fDebugWantRdTscExit = false;
12294 pVCpu->hm.s.fSingleInstruction = fSavedSingleInstruction;
12295
12296 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
12297 return rcStrict;
12298}
12299
12300
12301/** @} */
12302
12303
12304/**
12305 * Checks if any expensive dtrace probes are enabled and we should go to the
12306 * debug loop.
12307 *
12308 * @returns true if we should use debug loop, false if not.
12309 */
12310static bool hmR0VmxAnyExpensiveProbesEnabled(void)
12311{
12312 /* It's probably faster to OR the raw 32-bit counter variables together.
12313 Since the variables are in an array and the probes are next to one
12314 another (more or less), we have good locality. So, better read
12315 eight-nine cache lines ever time and only have one conditional, than
12316 128+ conditionals, right? */
12317 return ( VBOXVMM_R0_HMVMX_VMEXIT_ENABLED_RAW() /* expensive too due to context */
12318 | VBOXVMM_XCPT_DE_ENABLED_RAW()
12319 | VBOXVMM_XCPT_DB_ENABLED_RAW()
12320 | VBOXVMM_XCPT_BP_ENABLED_RAW()
12321 | VBOXVMM_XCPT_OF_ENABLED_RAW()
12322 | VBOXVMM_XCPT_BR_ENABLED_RAW()
12323 | VBOXVMM_XCPT_UD_ENABLED_RAW()
12324 | VBOXVMM_XCPT_NM_ENABLED_RAW()
12325 | VBOXVMM_XCPT_DF_ENABLED_RAW()
12326 | VBOXVMM_XCPT_TS_ENABLED_RAW()
12327 | VBOXVMM_XCPT_NP_ENABLED_RAW()
12328 | VBOXVMM_XCPT_SS_ENABLED_RAW()
12329 | VBOXVMM_XCPT_GP_ENABLED_RAW()
12330 | VBOXVMM_XCPT_PF_ENABLED_RAW()
12331 | VBOXVMM_XCPT_MF_ENABLED_RAW()
12332 | VBOXVMM_XCPT_AC_ENABLED_RAW()
12333 | VBOXVMM_XCPT_XF_ENABLED_RAW()
12334 | VBOXVMM_XCPT_VE_ENABLED_RAW()
12335 | VBOXVMM_XCPT_SX_ENABLED_RAW()
12336 | VBOXVMM_INT_SOFTWARE_ENABLED_RAW()
12337 | VBOXVMM_INT_HARDWARE_ENABLED_RAW()
12338 ) != 0
12339 || ( VBOXVMM_INSTR_HALT_ENABLED_RAW()
12340 | VBOXVMM_INSTR_MWAIT_ENABLED_RAW()
12341 | VBOXVMM_INSTR_MONITOR_ENABLED_RAW()
12342 | VBOXVMM_INSTR_CPUID_ENABLED_RAW()
12343 | VBOXVMM_INSTR_INVD_ENABLED_RAW()
12344 | VBOXVMM_INSTR_WBINVD_ENABLED_RAW()
12345 | VBOXVMM_INSTR_INVLPG_ENABLED_RAW()
12346 | VBOXVMM_INSTR_RDTSC_ENABLED_RAW()
12347 | VBOXVMM_INSTR_RDTSCP_ENABLED_RAW()
12348 | VBOXVMM_INSTR_RDPMC_ENABLED_RAW()
12349 | VBOXVMM_INSTR_RDMSR_ENABLED_RAW()
12350 | VBOXVMM_INSTR_WRMSR_ENABLED_RAW()
12351 | VBOXVMM_INSTR_CRX_READ_ENABLED_RAW()
12352 | VBOXVMM_INSTR_CRX_WRITE_ENABLED_RAW()
12353 | VBOXVMM_INSTR_DRX_READ_ENABLED_RAW()
12354 | VBOXVMM_INSTR_DRX_WRITE_ENABLED_RAW()
12355 | VBOXVMM_INSTR_PAUSE_ENABLED_RAW()
12356 | VBOXVMM_INSTR_XSETBV_ENABLED_RAW()
12357 | VBOXVMM_INSTR_SIDT_ENABLED_RAW()
12358 | VBOXVMM_INSTR_LIDT_ENABLED_RAW()
12359 | VBOXVMM_INSTR_SGDT_ENABLED_RAW()
12360 | VBOXVMM_INSTR_LGDT_ENABLED_RAW()
12361 | VBOXVMM_INSTR_SLDT_ENABLED_RAW()
12362 | VBOXVMM_INSTR_LLDT_ENABLED_RAW()
12363 | VBOXVMM_INSTR_STR_ENABLED_RAW()
12364 | VBOXVMM_INSTR_LTR_ENABLED_RAW()
12365 | VBOXVMM_INSTR_GETSEC_ENABLED_RAW()
12366 | VBOXVMM_INSTR_RSM_ENABLED_RAW()
12367 | VBOXVMM_INSTR_RDRAND_ENABLED_RAW()
12368 | VBOXVMM_INSTR_RDSEED_ENABLED_RAW()
12369 | VBOXVMM_INSTR_XSAVES_ENABLED_RAW()
12370 | VBOXVMM_INSTR_XRSTORS_ENABLED_RAW()
12371 | VBOXVMM_INSTR_VMM_CALL_ENABLED_RAW()
12372 | VBOXVMM_INSTR_VMX_VMCLEAR_ENABLED_RAW()
12373 | VBOXVMM_INSTR_VMX_VMLAUNCH_ENABLED_RAW()
12374 | VBOXVMM_INSTR_VMX_VMPTRLD_ENABLED_RAW()
12375 | VBOXVMM_INSTR_VMX_VMPTRST_ENABLED_RAW()
12376 | VBOXVMM_INSTR_VMX_VMREAD_ENABLED_RAW()
12377 | VBOXVMM_INSTR_VMX_VMRESUME_ENABLED_RAW()
12378 | VBOXVMM_INSTR_VMX_VMWRITE_ENABLED_RAW()
12379 | VBOXVMM_INSTR_VMX_VMXOFF_ENABLED_RAW()
12380 | VBOXVMM_INSTR_VMX_VMXON_ENABLED_RAW()
12381 | VBOXVMM_INSTR_VMX_VMFUNC_ENABLED_RAW()
12382 | VBOXVMM_INSTR_VMX_INVEPT_ENABLED_RAW()
12383 | VBOXVMM_INSTR_VMX_INVVPID_ENABLED_RAW()
12384 | VBOXVMM_INSTR_VMX_INVPCID_ENABLED_RAW()
12385 ) != 0
12386 || ( VBOXVMM_EXIT_TASK_SWITCH_ENABLED_RAW()
12387 | VBOXVMM_EXIT_HALT_ENABLED_RAW()
12388 | VBOXVMM_EXIT_MWAIT_ENABLED_RAW()
12389 | VBOXVMM_EXIT_MONITOR_ENABLED_RAW()
12390 | VBOXVMM_EXIT_CPUID_ENABLED_RAW()
12391 | VBOXVMM_EXIT_INVD_ENABLED_RAW()
12392 | VBOXVMM_EXIT_WBINVD_ENABLED_RAW()
12393 | VBOXVMM_EXIT_INVLPG_ENABLED_RAW()
12394 | VBOXVMM_EXIT_RDTSC_ENABLED_RAW()
12395 | VBOXVMM_EXIT_RDTSCP_ENABLED_RAW()
12396 | VBOXVMM_EXIT_RDPMC_ENABLED_RAW()
12397 | VBOXVMM_EXIT_RDMSR_ENABLED_RAW()
12398 | VBOXVMM_EXIT_WRMSR_ENABLED_RAW()
12399 | VBOXVMM_EXIT_CRX_READ_ENABLED_RAW()
12400 | VBOXVMM_EXIT_CRX_WRITE_ENABLED_RAW()
12401 | VBOXVMM_EXIT_DRX_READ_ENABLED_RAW()
12402 | VBOXVMM_EXIT_DRX_WRITE_ENABLED_RAW()
12403 | VBOXVMM_EXIT_PAUSE_ENABLED_RAW()
12404 | VBOXVMM_EXIT_XSETBV_ENABLED_RAW()
12405 | VBOXVMM_EXIT_SIDT_ENABLED_RAW()
12406 | VBOXVMM_EXIT_LIDT_ENABLED_RAW()
12407 | VBOXVMM_EXIT_SGDT_ENABLED_RAW()
12408 | VBOXVMM_EXIT_LGDT_ENABLED_RAW()
12409 | VBOXVMM_EXIT_SLDT_ENABLED_RAW()
12410 | VBOXVMM_EXIT_LLDT_ENABLED_RAW()
12411 | VBOXVMM_EXIT_STR_ENABLED_RAW()
12412 | VBOXVMM_EXIT_LTR_ENABLED_RAW()
12413 | VBOXVMM_EXIT_GETSEC_ENABLED_RAW()
12414 | VBOXVMM_EXIT_RSM_ENABLED_RAW()
12415 | VBOXVMM_EXIT_RDRAND_ENABLED_RAW()
12416 | VBOXVMM_EXIT_RDSEED_ENABLED_RAW()
12417 | VBOXVMM_EXIT_XSAVES_ENABLED_RAW()
12418 | VBOXVMM_EXIT_XRSTORS_ENABLED_RAW()
12419 | VBOXVMM_EXIT_VMM_CALL_ENABLED_RAW()
12420 | VBOXVMM_EXIT_VMX_VMCLEAR_ENABLED_RAW()
12421 | VBOXVMM_EXIT_VMX_VMLAUNCH_ENABLED_RAW()
12422 | VBOXVMM_EXIT_VMX_VMPTRLD_ENABLED_RAW()
12423 | VBOXVMM_EXIT_VMX_VMPTRST_ENABLED_RAW()
12424 | VBOXVMM_EXIT_VMX_VMREAD_ENABLED_RAW()
12425 | VBOXVMM_EXIT_VMX_VMRESUME_ENABLED_RAW()
12426 | VBOXVMM_EXIT_VMX_VMWRITE_ENABLED_RAW()
12427 | VBOXVMM_EXIT_VMX_VMXOFF_ENABLED_RAW()
12428 | VBOXVMM_EXIT_VMX_VMXON_ENABLED_RAW()
12429 | VBOXVMM_EXIT_VMX_VMFUNC_ENABLED_RAW()
12430 | VBOXVMM_EXIT_VMX_INVEPT_ENABLED_RAW()
12431 | VBOXVMM_EXIT_VMX_INVVPID_ENABLED_RAW()
12432 | VBOXVMM_EXIT_VMX_INVPCID_ENABLED_RAW()
12433 | VBOXVMM_EXIT_VMX_EPT_VIOLATION_ENABLED_RAW()
12434 | VBOXVMM_EXIT_VMX_EPT_MISCONFIG_ENABLED_RAW()
12435 | VBOXVMM_EXIT_VMX_VAPIC_ACCESS_ENABLED_RAW()
12436 | VBOXVMM_EXIT_VMX_VAPIC_WRITE_ENABLED_RAW()
12437 ) != 0;
12438}
12439
12440
12441/**
12442 * Runs the guest using hardware-assisted VMX.
12443 *
12444 * @returns Strict VBox status code (i.e. informational status codes too).
12445 * @param pVCpu The cross context virtual CPU structure.
12446 */
12447VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPU pVCpu)
12448{
12449 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
12450 Assert(VMMRZCallRing3IsEnabled(pVCpu));
12451 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
12452 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
12453
12454 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pCtx);
12455
12456 VBOXSTRICTRC rcStrict;
12457 uint32_t cLoops = 0;
12458#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12459 bool const fInNestedGuestMode = CPUMIsGuestInVmxNonRootMode(pCtx);
12460#else
12461 bool const fInNestedGuestMode = false;
12462#endif
12463 if (!fInNestedGuestMode)
12464 {
12465 if ( !pVCpu->hm.s.fUseDebugLoop
12466 && (!VBOXVMM_ANY_PROBES_ENABLED() || !hmR0VmxAnyExpensiveProbesEnabled())
12467 && !DBGFIsStepping(pVCpu)
12468 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
12469 rcStrict = hmR0VmxRunGuestCodeNormal(pVCpu, &cLoops);
12470 else
12471 rcStrict = hmR0VmxRunGuestCodeDebug(pVCpu, &cLoops);
12472 }
12473#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12474 else
12475 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
12476
12477 if (rcStrict == VINF_VMX_VMLAUNCH_VMRESUME)
12478 rcStrict = hmR0VmxRunGuestCodeNested(pVCpu, &cLoops);
12479#endif
12480
12481 if (rcStrict == VERR_EM_INTERPRETER)
12482 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
12483 else if (rcStrict == VINF_EM_RESET)
12484 rcStrict = VINF_EM_TRIPLE_FAULT;
12485
12486 int rc2 = hmR0VmxExitToRing3(pVCpu, rcStrict);
12487 if (RT_FAILURE(rc2))
12488 {
12489 pVCpu->hm.s.u32HMError = (uint32_t)VBOXSTRICTRC_VAL(rcStrict);
12490 rcStrict = rc2;
12491 }
12492 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
12493 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
12494 return rcStrict;
12495}
12496
12497
12498#ifndef HMVMX_USE_FUNCTION_TABLE
12499/**
12500 * Handles a guest VM-exit from hardware-assisted VMX execution.
12501 *
12502 * @returns Strict VBox status code (i.e. informational status codes too).
12503 * @param pVCpu The cross context virtual CPU structure.
12504 * @param pVmxTransient The VMX-transient structure.
12505 */
12506DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12507{
12508#ifdef DEBUG_ramshankar
12509#define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
12510 do { \
12511 if (a_fSave != 0) \
12512 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); \
12513 VBOXSTRICTRC rcStrict = a_CallExpr; \
12514 if (a_fSave != 0) \
12515 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \
12516 return rcStrict; \
12517 } while (0)
12518#else
12519# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
12520#endif
12521 uint32_t const uExitReason = pVmxTransient->uExitReason;
12522 switch (uExitReason)
12523 {
12524 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, hmR0VmxExitEptMisconfig(pVCpu, pVmxTransient));
12525 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, hmR0VmxExitEptViolation(pVCpu, pVmxTransient));
12526 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, hmR0VmxExitIoInstr(pVCpu, pVmxTransient));
12527 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0VmxExitCpuid(pVCpu, pVmxTransient));
12528 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0VmxExitRdtsc(pVCpu, pVmxTransient));
12529 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0VmxExitRdtscp(pVCpu, pVmxTransient));
12530 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitApicAccess(pVCpu, pVmxTransient));
12531 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, hmR0VmxExitXcptOrNmi(pVCpu, pVmxTransient));
12532 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovCRx(pVCpu, pVmxTransient));
12533 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, hmR0VmxExitExtInt(pVCpu, pVmxTransient));
12534 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, hmR0VmxExitIntWindow(pVCpu, pVmxTransient));
12535 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, hmR0VmxExitTprBelowThreshold(pVCpu, pVmxTransient));
12536 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0VmxExitMwait(pVCpu, pVmxTransient));
12537 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0VmxExitMonitor(pVCpu, pVmxTransient));
12538 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0VmxExitTaskSwitch(pVCpu, pVmxTransient));
12539 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, hmR0VmxExitPreemptTimer(pVCpu, pVmxTransient));
12540 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, hmR0VmxExitRdmsr(pVCpu, pVmxTransient));
12541 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, hmR0VmxExitWrmsr(pVCpu, pVmxTransient));
12542 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, hmR0VmxExitVmcall(pVCpu, pVmxTransient));
12543 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovDRx(pVCpu, pVmxTransient));
12544 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0VmxExitHlt(pVCpu, pVmxTransient));
12545 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0VmxExitInvd(pVCpu, pVmxTransient));
12546 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0VmxExitInvlpg(pVCpu, pVmxTransient));
12547 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, hmR0VmxExitMtf(pVCpu, pVmxTransient));
12548 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0VmxExitPause(pVCpu, pVmxTransient));
12549 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0VmxExitWbinvd(pVCpu, pVmxTransient));
12550 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0VmxExitXsetbv(pVCpu, pVmxTransient));
12551 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, hmR0VmxExitInvpcid(pVCpu, pVmxTransient));
12552 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, hmR0VmxExitGetsec(pVCpu, pVmxTransient));
12553 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0VmxExitRdpmc(pVCpu, pVmxTransient));
12554#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12555 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, hmR0VmxExitVmclear(pVCpu, pVmxTransient));
12556 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, hmR0VmxExitVmlaunch(pVCpu, pVmxTransient));
12557 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, hmR0VmxExitVmptrld(pVCpu, pVmxTransient));
12558 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, hmR0VmxExitVmptrst(pVCpu, pVmxTransient));
12559 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, hmR0VmxExitVmread(pVCpu, pVmxTransient));
12560 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, hmR0VmxExitVmwrite(pVCpu, pVmxTransient));
12561 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, hmR0VmxExitVmresume(pVCpu, pVmxTransient));
12562 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, hmR0VmxExitVmxoff(pVCpu, pVmxTransient));
12563 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, hmR0VmxExitVmxon(pVCpu, pVmxTransient));
12564 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, hmR0VmxExitInvvpid(pVCpu, pVmxTransient));
12565 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, hmR0VmxExitSetPendingXcptUD(pVCpu, pVmxTransient));
12566#else
12567 case VMX_EXIT_VMCLEAR:
12568 case VMX_EXIT_VMLAUNCH:
12569 case VMX_EXIT_VMPTRLD:
12570 case VMX_EXIT_VMPTRST:
12571 case VMX_EXIT_VMREAD:
12572 case VMX_EXIT_VMRESUME:
12573 case VMX_EXIT_VMWRITE:
12574 case VMX_EXIT_VMXOFF:
12575 case VMX_EXIT_VMXON:
12576 case VMX_EXIT_INVVPID:
12577 case VMX_EXIT_INVEPT:
12578 return hmR0VmxExitSetPendingXcptUD(pVCpu, pVmxTransient);
12579#endif
12580
12581 case VMX_EXIT_TRIPLE_FAULT: return hmR0VmxExitTripleFault(pVCpu, pVmxTransient);
12582 case VMX_EXIT_NMI_WINDOW: return hmR0VmxExitNmiWindow(pVCpu, pVmxTransient);
12583 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return hmR0VmxExitErrInvalidGuestState(pVCpu, pVmxTransient);
12584
12585 case VMX_EXIT_INIT_SIGNAL:
12586 case VMX_EXIT_SIPI:
12587 case VMX_EXIT_IO_SMI:
12588 case VMX_EXIT_SMI:
12589 case VMX_EXIT_ERR_MSR_LOAD:
12590 case VMX_EXIT_ERR_MACHINE_CHECK:
12591 case VMX_EXIT_PML_FULL:
12592 case VMX_EXIT_VIRTUALIZED_EOI:
12593 case VMX_EXIT_GDTR_IDTR_ACCESS:
12594 case VMX_EXIT_LDTR_TR_ACCESS:
12595 case VMX_EXIT_APIC_WRITE:
12596 case VMX_EXIT_RDRAND:
12597 case VMX_EXIT_RSM:
12598 case VMX_EXIT_VMFUNC:
12599 case VMX_EXIT_ENCLS:
12600 case VMX_EXIT_RDSEED:
12601 case VMX_EXIT_XSAVES:
12602 case VMX_EXIT_XRSTORS:
12603 case VMX_EXIT_UMWAIT:
12604 case VMX_EXIT_TPAUSE:
12605 default:
12606 return hmR0VmxExitErrUnexpected(pVCpu, pVmxTransient);
12607 }
12608#undef VMEXIT_CALL_RET
12609}
12610#endif /* !HMVMX_USE_FUNCTION_TABLE */
12611
12612
12613#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12614/**
12615 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
12616 *
12617 * @returns Strict VBox status code (i.e. informational status codes too).
12618 * @param pVCpu The cross context virtual CPU structure.
12619 * @param pVmxTransient The VMX-transient structure.
12620 */
12621DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12622{
12623 uint32_t const uExitReason = pVmxTransient->uExitReason;
12624 switch (uExitReason)
12625 {
12626 case VMX_EXIT_EPT_MISCONFIG: return hmR0VmxExitEptMisconfig(pVCpu, pVmxTransient);
12627 case VMX_EXIT_EPT_VIOLATION: return hmR0VmxExitEptViolation(pVCpu, pVmxTransient);
12628 case VMX_EXIT_XCPT_OR_NMI: return hmR0VmxExitXcptOrNmiNested(pVCpu, pVmxTransient);
12629 case VMX_EXIT_IO_INSTR: return hmR0VmxExitIoInstrNested(pVCpu, pVmxTransient);
12630 case VMX_EXIT_HLT: return hmR0VmxExitHltNested(pVCpu, pVmxTransient);
12631
12632 /*
12633 * We shouldn't direct host physical interrupts to the nested-guest.
12634 */
12635 case VMX_EXIT_EXT_INT:
12636 return hmR0VmxExitExtInt(pVCpu, pVmxTransient);
12637
12638 /*
12639 * Instructions that cause VM-exits unconditionally or the condition is
12640 * always is taken solely from the guest hypervisor (meaning if the VM-exit
12641 * happens, it's guaranteed to be a nested-guest VM-exit).
12642 *
12643 * - Provides VM-exit instruction length ONLY.
12644 */
12645 case VMX_EXIT_CPUID: /* Unconditional. */
12646 case VMX_EXIT_VMCALL:
12647 case VMX_EXIT_GETSEC:
12648 case VMX_EXIT_INVD:
12649 case VMX_EXIT_XSETBV:
12650 case VMX_EXIT_VMLAUNCH:
12651 case VMX_EXIT_VMRESUME:
12652 case VMX_EXIT_VMXOFF:
12653 case VMX_EXIT_ENCLS: /* Condition specified solely by guest hypervisor. */
12654 case VMX_EXIT_VMFUNC:
12655 return hmR0VmxExitInstrNested(pVCpu, pVmxTransient);
12656
12657 /*
12658 * Instructions that cause VM-exits unconditionally or the condition is
12659 * always is taken solely from the guest hypervisor (meaning if the VM-exit
12660 * happens, it's guaranteed to be a nested-guest VM-exit).
12661 *
12662 * - Provides VM-exit instruction length.
12663 * - Provides VM-exit information.
12664 * - Optionally provides VM-exit qualification.
12665 *
12666 * Since VM-exit qualification is 0 for all VM-exits where it is not
12667 * applicable, reading and passing it to the guest should produce
12668 * defined behavior.
12669 *
12670 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
12671 */
12672 case VMX_EXIT_INVEPT: /* Unconditional. */
12673 case VMX_EXIT_INVVPID:
12674 case VMX_EXIT_VMCLEAR:
12675 case VMX_EXIT_VMPTRLD:
12676 case VMX_EXIT_VMPTRST:
12677 case VMX_EXIT_VMXON:
12678 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by guest hypervisor. */
12679 case VMX_EXIT_LDTR_TR_ACCESS:
12680 case VMX_EXIT_RDRAND:
12681 case VMX_EXIT_RDSEED:
12682 case VMX_EXIT_XSAVES:
12683 case VMX_EXIT_XRSTORS:
12684 case VMX_EXIT_UMWAIT:
12685 case VMX_EXIT_TPAUSE:
12686 return hmR0VmxExitInstrWithInfoNested(pVCpu, pVmxTransient);
12687
12688 case VMX_EXIT_RDTSC: return hmR0VmxExitRdtscNested(pVCpu, pVmxTransient);
12689 case VMX_EXIT_RDTSCP: return hmR0VmxExitRdtscpNested(pVCpu, pVmxTransient);
12690 case VMX_EXIT_RDMSR: return hmR0VmxExitRdmsrNested(pVCpu, pVmxTransient);
12691 case VMX_EXIT_WRMSR: return hmR0VmxExitWrmsrNested(pVCpu, pVmxTransient);
12692 case VMX_EXIT_INVLPG: return hmR0VmxExitInvlpgNested(pVCpu, pVmxTransient);
12693 case VMX_EXIT_INVPCID: return hmR0VmxExitInvpcidNested(pVCpu, pVmxTransient);
12694 case VMX_EXIT_TASK_SWITCH: return hmR0VmxExitTaskSwitchNested(pVCpu, pVmxTransient);
12695 case VMX_EXIT_WBINVD: return hmR0VmxExitWbinvdNested(pVCpu, pVmxTransient);
12696 case VMX_EXIT_MTF: return hmR0VmxExitMtfNested(pVCpu, pVmxTransient);
12697 case VMX_EXIT_APIC_ACCESS: return hmR0VmxExitApicAccessNested(pVCpu, pVmxTransient);
12698 case VMX_EXIT_APIC_WRITE: return hmR0VmxExitApicWriteNested(pVCpu, pVmxTransient);
12699 case VMX_EXIT_VIRTUALIZED_EOI: return hmR0VmxExitVirtEoiNested(pVCpu, pVmxTransient);
12700 case VMX_EXIT_MOV_CRX: return hmR0VmxExitMovCRxNested(pVCpu, pVmxTransient);
12701 case VMX_EXIT_INT_WINDOW: return hmR0VmxExitIntWindowNested(pVCpu, pVmxTransient);
12702 case VMX_EXIT_NMI_WINDOW: return hmR0VmxExitNmiWindowNested(pVCpu, pVmxTransient);
12703 case VMX_EXIT_TPR_BELOW_THRESHOLD: return hmR0VmxExitTprBelowThresholdNested(pVCpu, pVmxTransient);
12704 case VMX_EXIT_MWAIT: return hmR0VmxExitMwaitNested(pVCpu, pVmxTransient);
12705 case VMX_EXIT_MONITOR: return hmR0VmxExitMonitorNested(pVCpu, pVmxTransient);
12706 case VMX_EXIT_PAUSE: return hmR0VmxExitPauseNested(pVCpu, pVmxTransient);
12707
12708 case VMX_EXIT_PREEMPT_TIMER:
12709 {
12710 /** @todo NSTVMX: Preempt timer. */
12711 return hmR0VmxExitErrUnexpected(pVCpu, pVmxTransient);
12712 }
12713
12714 case VMX_EXIT_MOV_DRX: return hmR0VmxExitMovDRxNested(pVCpu, pVmxTransient);
12715 case VMX_EXIT_RDPMC: return hmR0VmxExitRdpmcNested(pVCpu, pVmxTransient);
12716
12717 case VMX_EXIT_VMREAD:
12718 case VMX_EXIT_VMWRITE: return hmR0VmxExitVmreadVmwriteNested(pVCpu, pVmxTransient);
12719
12720 case VMX_EXIT_TRIPLE_FAULT: return hmR0VmxExitTripleFaultNested(pVCpu, pVmxTransient);
12721 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return hmR0VmxExitErrInvalidGuestState(pVCpu, pVmxTransient);
12722
12723 case VMX_EXIT_INIT_SIGNAL:
12724 case VMX_EXIT_SIPI:
12725 case VMX_EXIT_IO_SMI:
12726 case VMX_EXIT_SMI:
12727 case VMX_EXIT_ERR_MSR_LOAD:
12728 case VMX_EXIT_ERR_MACHINE_CHECK:
12729 case VMX_EXIT_PML_FULL:
12730 case VMX_EXIT_RSM:
12731 default:
12732 {
12733 return hmR0VmxExitErrUnexpected(pVCpu, pVmxTransient);
12734 }
12735 }
12736}
12737#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12738
12739
12740#ifdef VBOX_STRICT
12741/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
12742# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
12743 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
12744
12745# define HMVMX_ASSERT_PREEMPT_CPUID() \
12746 do { \
12747 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
12748 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
12749 } while (0)
12750
12751# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
12752 do { \
12753 AssertPtr((a_pVCpu)); \
12754 AssertPtr((a_pVmxTransient)); \
12755 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
12756 Assert((a_pVmxTransient)->pVmcsInfo); \
12757 Assert(ASMIntAreEnabled()); \
12758 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
12759 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
12760 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", (a_pVCpu)->idCpu)); \
12761 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
12762 if (VMMR0IsLogFlushDisabled((a_pVCpu))) \
12763 HMVMX_ASSERT_PREEMPT_CPUID(); \
12764 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
12765 } while (0)
12766
12767# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
12768 do { \
12769 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
12770 Assert((a_pVmxTransient)->fIsNestedGuest); \
12771 } while (0)
12772
12773# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
12774 do { \
12775 Log4Func(("\n")); \
12776 } while (0)
12777#else
12778# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
12779 do { \
12780 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
12781 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
12782 } while (0)
12783
12784# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
12785 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
12786
12787# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
12788#endif
12789
12790
12791/**
12792 * Advances the guest RIP by the specified number of bytes.
12793 *
12794 * @param pVCpu The cross context virtual CPU structure.
12795 * @param cbInstr Number of bytes to advance the RIP by.
12796 *
12797 * @remarks No-long-jump zone!!!
12798 */
12799DECLINLINE(void) hmR0VmxAdvanceGuestRipBy(PVMCPU pVCpu, uint32_t cbInstr)
12800{
12801 /* Advance the RIP. */
12802 pVCpu->cpum.GstCtx.rip += cbInstr;
12803 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
12804
12805 /* Update interrupt inhibition. */
12806 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
12807 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
12808 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
12809}
12810
12811
12812/**
12813 * Advances the guest RIP after reading it from the VMCS.
12814 *
12815 * @returns VBox status code, no informational status codes.
12816 * @param pVCpu The cross context virtual CPU structure.
12817 * @param pVmxTransient The VMX-transient structure.
12818 *
12819 * @remarks No-long-jump zone!!!
12820 */
12821static int hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12822{
12823 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12824 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
12825 AssertRCReturn(rc, rc);
12826
12827 hmR0VmxAdvanceGuestRipBy(pVCpu, pVmxTransient->cbInstr);
12828 return VINF_SUCCESS;
12829}
12830
12831
12832/**
12833 * Handle a condition that occurred while delivering an event through the guest
12834 * IDT.
12835 *
12836 * @returns Strict VBox status code (i.e. informational status codes too).
12837 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
12838 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
12839 * to continue execution of the guest which will delivery the \#DF.
12840 * @retval VINF_EM_RESET if we detected a triple-fault condition.
12841 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
12842 *
12843 * @param pVCpu The cross context virtual CPU structure.
12844 * @param pVmxTransient The VMX-transient structure.
12845 *
12846 * @remarks No-long-jump zone!!!
12847 */
12848static VBOXSTRICTRC hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12849{
12850 uint32_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
12851
12852 int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
12853 rc2 |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
12854 AssertRCReturn(rc2, rc2);
12855
12856 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
12857 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
12858 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
12859 {
12860 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
12861 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
12862
12863 /*
12864 * If the event was a software interrupt (generated with INT n) or a software exception
12865 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
12866 * can handle the VM-exit and continue guest execution which will re-execute the
12867 * instruction rather than re-injecting the exception, as that can cause premature
12868 * trips to ring-3 before injection and involve TRPM which currently has no way of
12869 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
12870 * the problem).
12871 */
12872 IEMXCPTRAISE enmRaise;
12873 IEMXCPTRAISEINFO fRaiseInfo;
12874 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
12875 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
12876 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
12877 {
12878 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
12879 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
12880 }
12881 else if (VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo))
12882 {
12883 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
12884 uint32_t const fIdtVectorFlags = hmR0VmxGetIemXcptFlags(uIdtVector, uIdtVectorType);
12885 uint32_t const fExitVectorFlags = hmR0VmxGetIemXcptFlags(uExitVector, uExitVectorType);
12886 /** @todo Make AssertMsgReturn as just AssertMsg later. */
12887 AssertMsgReturn(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT,
12888 ("Unexpected VM-exit interruption vector type %#x!\n", uExitVectorType), VERR_VMX_IPE_5);
12889
12890 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
12891
12892 /* Determine a vectoring #PF condition, see comment in hmR0VmxExitXcptPF(). */
12893 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
12894 {
12895 pVmxTransient->fVectoringPF = true;
12896 enmRaise = IEMXCPTRAISE_PREV_EVENT;
12897 }
12898 }
12899 else
12900 {
12901 /*
12902 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
12903 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
12904 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
12905 */
12906 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
12907 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
12908 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
12909 enmRaise = IEMXCPTRAISE_PREV_EVENT;
12910 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
12911 }
12912
12913 /*
12914 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
12915 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
12916 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
12917 * subsequent VM-entry would fail.
12918 *
12919 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}.
12920 */
12921 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)
12922 && uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
12923 && ( enmRaise == IEMXCPTRAISE_PREV_EVENT
12924 || (fRaiseInfo & IEMXCPTRAISEINFO_NMI_PF))
12925 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
12926 {
12927 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
12928 }
12929
12930 switch (enmRaise)
12931 {
12932 case IEMXCPTRAISE_CURRENT_XCPT:
12933 {
12934 Log4Func(("IDT: Pending secondary Xcpt: uIdtVectoringInfo=%#RX64 uExitIntInfo=%#RX64\n",
12935 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uExitIntInfo));
12936 Assert(rcStrict == VINF_SUCCESS);
12937 break;
12938 }
12939
12940 case IEMXCPTRAISE_PREV_EVENT:
12941 {
12942 uint32_t u32ErrCode;
12943 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
12944 {
12945 rc2 = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
12946 AssertRCReturn(rc2, rc2);
12947 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
12948 }
12949 else
12950 u32ErrCode = 0;
12951
12952 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see hmR0VmxExitXcptPF(). */
12953 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
12954 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
12955 0 /* cbInstr */, u32ErrCode, pVCpu->cpum.GstCtx.cr2);
12956
12957 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo,
12958 pVCpu->hm.s.Event.u32ErrCode));
12959 Assert(rcStrict == VINF_SUCCESS);
12960 break;
12961 }
12962
12963 case IEMXCPTRAISE_REEXEC_INSTR:
12964 Assert(rcStrict == VINF_SUCCESS);
12965 break;
12966
12967 case IEMXCPTRAISE_DOUBLE_FAULT:
12968 {
12969 /*
12970 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
12971 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
12972 */
12973 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
12974 {
12975 pVmxTransient->fVectoringDoublePF = true;
12976 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo,
12977 pVCpu->cpum.GstCtx.cr2));
12978 rcStrict = VINF_SUCCESS;
12979 }
12980 else
12981 {
12982 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
12983 hmR0VmxSetPendingXcptDF(pVCpu);
12984 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,
12985 uIdtVector, uExitVector));
12986 rcStrict = VINF_HM_DOUBLE_FAULT;
12987 }
12988 break;
12989 }
12990
12991 case IEMXCPTRAISE_TRIPLE_FAULT:
12992 {
12993 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));
12994 rcStrict = VINF_EM_RESET;
12995 break;
12996 }
12997
12998 case IEMXCPTRAISE_CPU_HANG:
12999 {
13000 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
13001 rcStrict = VERR_EM_GUEST_CPU_HANG;
13002 break;
13003 }
13004
13005 default:
13006 {
13007 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
13008 rcStrict = VERR_VMX_IPE_2;
13009 break;
13010 }
13011 }
13012 }
13013 else if ( VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo)
13014 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitIntInfo)
13015 && uExitVector != X86_XCPT_DF
13016 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
13017 {
13018 /*
13019 * Execution of IRET caused this fault when NMI blocking was in effect (i.e we're in the guest NMI handler).
13020 * We need to set the block-by-NMI field so that NMIs remain blocked until the IRET execution is restarted.
13021 * See Intel spec. 30.7.1.2 "Resuming guest software after handling an exception".
13022 */
13023 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
13024 {
13025 Log4Func(("Setting VMCPU_FF_BLOCK_NMIS. fValid=%RTbool uExitReason=%u\n",
13026 VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason));
13027 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
13028 }
13029 }
13030
13031 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
13032 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
13033 return rcStrict;
13034}
13035
13036
13037/** @name VM-exit handlers.
13038 * @{
13039 */
13040/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
13041/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
13042/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
13043
13044/**
13045 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
13046 */
13047HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13048{
13049 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13050 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
13051 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
13052 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
13053 return VINF_SUCCESS;
13054 return VINF_EM_RAW_INTERRUPT;
13055}
13056
13057
13058/**
13059 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
13060 * VM-exit.
13061 */
13062HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13063{
13064 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13065 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
13066
13067 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13068 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
13069 AssertRCReturn(rc, rc);
13070
13071 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
13072 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
13073 && uIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
13074 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
13075
13076 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
13077 {
13078 /*
13079 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
13080 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
13081 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
13082 *
13083 * [1] -- See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
13084 * [2] -- See Intel spec. 27.5.5 "Updating Non-Register State".
13085 */
13086 return hmR0VmxExitHostNmi(pVCpu);
13087 }
13088
13089 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
13090 VBOXSTRICTRC rcStrict = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
13091 if (RT_UNLIKELY(rcStrict == VINF_SUCCESS))
13092 { /* likely */ }
13093 else
13094 {
13095 if (rcStrict == VINF_HM_DOUBLE_FAULT)
13096 rcStrict = VINF_SUCCESS;
13097 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
13098 return rcStrict;
13099 }
13100
13101 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
13102 uint32_t const uVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
13103 switch (uIntType)
13104 {
13105 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT: /* Privileged software exception. (#DB from ICEBP) */
13106 Assert(uVector == X86_XCPT_DB);
13107 RT_FALL_THRU();
13108 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
13109 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
13110 RT_FALL_THRU();
13111 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
13112 {
13113 /*
13114 * If there's any exception caused as a result of event injection, the resulting
13115 * secondary/final execption will be pending, we shall continue guest execution
13116 * after injecting the event. The page-fault case is complicated and we manually
13117 * handle any currently pending event in hmR0VmxExitXcptPF.
13118 */
13119 if (!pVCpu->hm.s.Event.fPending)
13120 { /* likely */ }
13121 else if (uVector != X86_XCPT_PF)
13122 {
13123 rcStrict = VINF_SUCCESS;
13124 break;
13125 }
13126
13127 switch (uVector)
13128 {
13129 case X86_XCPT_PF: rcStrict = hmR0VmxExitXcptPF(pVCpu, pVmxTransient); break;
13130 case X86_XCPT_GP: rcStrict = hmR0VmxExitXcptGP(pVCpu, pVmxTransient); break;
13131 case X86_XCPT_MF: rcStrict = hmR0VmxExitXcptMF(pVCpu, pVmxTransient); break;
13132 case X86_XCPT_DB: rcStrict = hmR0VmxExitXcptDB(pVCpu, pVmxTransient); break;
13133 case X86_XCPT_BP: rcStrict = hmR0VmxExitXcptBP(pVCpu, pVmxTransient); break;
13134 case X86_XCPT_AC: rcStrict = hmR0VmxExitXcptAC(pVCpu, pVmxTransient); break;
13135
13136 case X86_XCPT_NM: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
13137 rcStrict = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
13138 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
13139 rcStrict = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
13140 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
13141 rcStrict = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
13142 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
13143 rcStrict = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
13144 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
13145 rcStrict = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
13146 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
13147 rcStrict = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
13148 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS);
13149 rcStrict = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
13150 default:
13151 {
13152 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
13153 if (pVmcsInfo->RealMode.fRealOnV86Active)
13154 {
13155 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
13156 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
13157 Assert(CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx));
13158
13159 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
13160 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13161 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
13162 AssertRCReturn(rc, rc);
13163 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
13164 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode,
13165 0 /* GCPtrFaultAddress */);
13166 rcStrict = VINF_SUCCESS;
13167 }
13168 else
13169 {
13170 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
13171 pVCpu->hm.s.u32HMError = uVector;
13172 rcStrict = VERR_VMX_UNEXPECTED_EXCEPTION;
13173 }
13174 break;
13175 }
13176 }
13177 break;
13178 }
13179
13180 default:
13181 {
13182 pVCpu->hm.s.u32HMError = uExitIntInfo;
13183 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
13184 AssertMsgFailed(("Unexpected interruption info %#x\n", VMX_EXIT_INT_INFO_TYPE(uExitIntInfo)));
13185 break;
13186 }
13187 }
13188 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
13189 return rcStrict;
13190}
13191
13192
13193/**
13194 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
13195 */
13196HMVMX_EXIT_NSRC_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13197{
13198 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13199
13200 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
13201 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13202 int rc = hmR0VmxClearIntWindowExitVmcs(pVmcsInfo);
13203 AssertRCReturn(rc, rc);
13204
13205 /* Evaluate and deliver pending events and resume guest execution. */
13206 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
13207 return VINF_SUCCESS;
13208}
13209
13210
13211/**
13212 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
13213 */
13214HMVMX_EXIT_NSRC_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13215{
13216 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13217
13218 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13219 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
13220 {
13221 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
13222 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
13223 }
13224
13225 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS));
13226
13227 /*
13228 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
13229 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
13230 */
13231 uint32_t fIntrState;
13232 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
13233 AssertRCReturn(rc, rc);
13234 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
13235 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
13236 {
13237 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
13238 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
13239
13240 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
13241 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
13242 AssertRCReturn(rc, rc);
13243 }
13244
13245 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
13246 rc = hmR0VmxClearNmiWindowExitVmcs(pVmcsInfo);
13247 AssertRCReturn(rc, rc);
13248
13249 /* Evaluate and deliver pending events and resume guest execution. */
13250 return VINF_SUCCESS;
13251}
13252
13253
13254/**
13255 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
13256 */
13257HMVMX_EXIT_NSRC_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13258{
13259 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13260 return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
13261}
13262
13263
13264/**
13265 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
13266 */
13267HMVMX_EXIT_NSRC_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13268{
13269 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13270 return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
13271}
13272
13273
13274/**
13275 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
13276 */
13277HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13278{
13279 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13280
13281 /*
13282 * Get the state we need and update the exit history entry.
13283 */
13284 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13285 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13286 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
13287 AssertRCReturn(rc, rc);
13288
13289 VBOXSTRICTRC rcStrict;
13290 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
13291 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
13292 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
13293 if (!pExitRec)
13294 {
13295 /*
13296 * Regular CPUID instruction execution.
13297 */
13298 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbInstr);
13299 if (rcStrict == VINF_SUCCESS)
13300 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13301 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13302 {
13303 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13304 rcStrict = VINF_SUCCESS;
13305 }
13306 }
13307 else
13308 {
13309 /*
13310 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
13311 */
13312 int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
13313 AssertRCReturn(rc2, rc2);
13314
13315 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
13316 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
13317
13318 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
13319 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
13320
13321 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
13322 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
13323 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
13324 }
13325 return rcStrict;
13326}
13327
13328
13329/**
13330 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
13331 */
13332HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13333{
13334 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13335
13336 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13337 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
13338 AssertRCReturn(rc, rc);
13339
13340 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
13341 return VINF_EM_RAW_EMULATE_INSTR;
13342
13343 AssertMsgFailed(("hmR0VmxExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
13344 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
13345}
13346
13347
13348/**
13349 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
13350 */
13351HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13352{
13353 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13354
13355 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13356 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
13357 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13358 AssertRCReturn(rc, rc);
13359
13360 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbInstr);
13361 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13362 {
13363 /* If we get a spurious VM-exit when TSC offsetting is enabled,
13364 we must reset offsetting on VM-entry. See @bugref{6634}. */
13365 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
13366 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
13367 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13368 }
13369 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13370 {
13371 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13372 rcStrict = VINF_SUCCESS;
13373 }
13374 return rcStrict;
13375}
13376
13377
13378/**
13379 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
13380 */
13381HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13382{
13383 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13384
13385 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13386 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
13387 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13388 AssertRCReturn(rc, rc);
13389
13390 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbInstr);
13391 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13392 {
13393 /* If we get a spurious VM-exit when TSC offsetting is enabled,
13394 we must reset offsetting on VM-reentry. See @bugref{6634}. */
13395 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
13396 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
13397 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13398 }
13399 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13400 {
13401 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13402 rcStrict = VINF_SUCCESS;
13403 }
13404 return rcStrict;
13405}
13406
13407
13408/**
13409 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
13410 */
13411HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13412{
13413 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13414
13415 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13416 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
13417 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
13418 AssertRCReturn(rc, rc);
13419
13420 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
13421 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
13422 if (RT_LIKELY(rc == VINF_SUCCESS))
13423 {
13424 rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
13425 Assert(pVmxTransient->cbInstr == 2);
13426 }
13427 else
13428 {
13429 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
13430 rc = VERR_EM_INTERPRETER;
13431 }
13432 return rc;
13433}
13434
13435
13436/**
13437 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
13438 */
13439HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13440{
13441 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13442
13443 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
13444 if (EMAreHypercallInstructionsEnabled(pVCpu))
13445 {
13446 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13447 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
13448 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
13449 AssertRCReturn(rc, rc);
13450
13451 /* Perform the hypercall. */
13452 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
13453 if (rcStrict == VINF_SUCCESS)
13454 {
13455 rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
13456 AssertRCReturn(rc, rc);
13457 }
13458 else
13459 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
13460 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
13461 || RT_FAILURE(rcStrict));
13462
13463 /* If the hypercall changes anything other than guest's general-purpose registers,
13464 we would need to reload the guest changed bits here before VM-entry. */
13465 }
13466 else
13467 Log4Func(("Hypercalls not enabled\n"));
13468
13469 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
13470 if (RT_FAILURE(rcStrict))
13471 {
13472 hmR0VmxSetPendingXcptUD(pVCpu);
13473 rcStrict = VINF_SUCCESS;
13474 }
13475
13476 return rcStrict;
13477}
13478
13479
13480/**
13481 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
13482 */
13483HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13484{
13485 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13486 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hm.s.fUsingDebugLoop);
13487
13488 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13489 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
13490 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13491 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
13492 AssertRCReturn(rc, rc);
13493
13494 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbInstr, pVmxTransient->uExitQual);
13495
13496 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
13497 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13498 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13499 {
13500 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13501 rcStrict = VINF_SUCCESS;
13502 }
13503 else
13504 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
13505 VBOXSTRICTRC_VAL(rcStrict)));
13506 return rcStrict;
13507}
13508
13509
13510/**
13511 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
13512 */
13513HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13514{
13515 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13516
13517 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13518 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
13519 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13520 AssertRCReturn(rc, rc);
13521
13522 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbInstr);
13523 if (rcStrict == VINF_SUCCESS)
13524 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13525 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13526 {
13527 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13528 rcStrict = VINF_SUCCESS;
13529 }
13530
13531 return rcStrict;
13532}
13533
13534
13535/**
13536 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
13537 */
13538HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13539{
13540 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13541
13542 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13543 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
13544 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13545 AssertRCReturn(rc, rc);
13546
13547 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbInstr);
13548 if (RT_SUCCESS(rcStrict))
13549 {
13550 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13551 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
13552 rcStrict = VINF_SUCCESS;
13553 }
13554
13555 return rcStrict;
13556}
13557
13558
13559/**
13560 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
13561 * VM-exit.
13562 */
13563HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13564{
13565 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13566 return VINF_EM_RESET;
13567}
13568
13569
13570/**
13571 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
13572 */
13573HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13574{
13575 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13576
13577 int rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
13578 AssertRCReturn(rc, rc);
13579
13580 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
13581 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
13582 rc = VINF_SUCCESS;
13583 else
13584 rc = VINF_EM_HALT;
13585
13586 if (rc != VINF_SUCCESS)
13587 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
13588 return rc;
13589}
13590
13591
13592/**
13593 * VM-exit handler for instructions that result in a \#UD exception delivered to
13594 * the guest.
13595 */
13596HMVMX_EXIT_NSRC_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13597{
13598 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13599 hmR0VmxSetPendingXcptUD(pVCpu);
13600 return VINF_SUCCESS;
13601}
13602
13603
13604/**
13605 * VM-exit handler for expiry of the VMX-preemption timer.
13606 */
13607HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13608{
13609 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13610
13611 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
13612 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
13613
13614 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
13615 PVM pVM = pVCpu->CTX_SUFF(pVM);
13616 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
13617 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
13618 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
13619}
13620
13621
13622/**
13623 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
13624 */
13625HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13626{
13627 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13628
13629 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13630 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13631 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
13632 AssertRCReturn(rc, rc);
13633
13634 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbInstr);
13635 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
13636 : HM_CHANGED_RAISED_XCPT_MASK);
13637
13638 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
13639 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
13640
13641 return rcStrict;
13642}
13643
13644
13645/**
13646 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
13647 */
13648HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13649{
13650 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13651 /** @todo Use VM-exit instruction information. */
13652 return VERR_EM_INTERPRETER;
13653}
13654
13655
13656/**
13657 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
13658 * Error VM-exit.
13659 */
13660HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13661{
13662 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13663 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
13664 AssertRCReturn(rc, rc);
13665
13666 rc = hmR0VmxCheckVmcsCtls(pVCpu, pVmcsInfo);
13667 if (RT_FAILURE(rc))
13668 return rc;
13669
13670 uint32_t const uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pVmcsInfo);
13671 NOREF(uInvalidReason);
13672
13673#ifdef VBOX_STRICT
13674 uint32_t fIntrState;
13675 RTHCUINTREG uHCReg;
13676 uint64_t u64Val;
13677 uint32_t u32Val;
13678 rc = hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
13679 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
13680 rc |= hmR0VmxReadEntryInstrLenVmcs(pVmxTransient);
13681 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
13682 AssertRCReturn(rc, rc);
13683
13684 Log4(("uInvalidReason %u\n", uInvalidReason));
13685 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
13686 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
13687 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
13688 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
13689
13690 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
13691 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
13692 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
13693 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
13694 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
13695 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
13696 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
13697 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
13698 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
13699 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
13700 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
13701 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
13702
13703 hmR0DumpRegs(pVCpu);
13704#endif
13705
13706 return VERR_VMX_INVALID_GUEST_STATE;
13707}
13708
13709/**
13710 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
13711 */
13712HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrUnexpected(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13713{
13714 /*
13715 * Cummulative notes of all recognized but unexpected VM-exits.
13716 *
13717 * 1. This does -not- cover scenarios like like a page-fault VM-exit occurring when
13718 * nested-paging is used.
13719 *
13720 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
13721 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
13722 * this function (and thereby stop VM execution) for handling such instructions.
13723 *
13724 *
13725 * VMX_EXIT_INIT_SIGNAL:
13726 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
13727 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
13728 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
13729 *
13730 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
13731 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
13732 * See Intel spec. "23.8 Restrictions on VMX operation".
13733 *
13734 * VMX_EXIT_SIPI:
13735 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
13736 * activity state is used. We don't make use of it as our guests don't have direct
13737 * access to the host local APIC.
13738 *
13739 * See Intel spec. 25.3 "Other Causes of VM-exits".
13740 *
13741 * VMX_EXIT_IO_SMI:
13742 * VMX_EXIT_SMI:
13743 * This can only happen if we support dual-monitor treatment of SMI, which can be
13744 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
13745 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
13746 * VMX root mode or receive an SMI. If we get here, something funny is going on.
13747 *
13748 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
13749 * See Intel spec. 25.3 "Other Causes of VM-Exits"
13750 *
13751 * VMX_EXIT_ERR_MSR_LOAD:
13752 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
13753 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
13754 * execution.
13755 *
13756 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
13757 *
13758 * VMX_EXIT_ERR_MACHINE_CHECK:
13759 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
13760 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
13761 * #MC exception abort class exception is raised. We thus cannot assume a
13762 * reasonable chance of continuing any sort of execution and we bail.
13763 *
13764 * See Intel spec. 15.1 "Machine-check Architecture".
13765 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
13766 *
13767 * VMX_EXIT_PML_FULL:
13768 * VMX_EXIT_VIRTUALIZED_EOI:
13769 * VMX_EXIT_APIC_WRITE:
13770 * We do not currently support any of these features and thus they are all unexpected
13771 * VM-exits.
13772 *
13773 * VMX_EXIT_GDTR_IDTR_ACCESS:
13774 * VMX_EXIT_LDTR_TR_ACCESS:
13775 * VMX_EXIT_RDRAND:
13776 * VMX_EXIT_RSM:
13777 * VMX_EXIT_VMFUNC:
13778 * VMX_EXIT_ENCLS:
13779 * VMX_EXIT_RDSEED:
13780 * VMX_EXIT_XSAVES:
13781 * VMX_EXIT_XRSTORS:
13782 * VMX_EXIT_UMWAIT:
13783 * VMX_EXIT_TPAUSE:
13784 * These VM-exits are -not- caused unconditionally by execution of the corresponding
13785 * instruction. Any VM-exit for these instructions indicate a hardware problem,
13786 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
13787 *
13788 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
13789 */
13790 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13791 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
13792 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
13793}
13794
13795
13796/**
13797 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
13798 */
13799HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13800{
13801 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13802
13803 /** @todo Optimize this: We currently drag in in the whole MSR state
13804 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
13805 * MSRs required. That would require changes to IEM and possibly CPUM too.
13806 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
13807 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13808 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
13809 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
13810 switch (idMsr)
13811 {
13812 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
13813 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
13814 }
13815
13816 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13817 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fImport);
13818 AssertRCReturn(rc, rc);
13819
13820 Log4Func(("ecx=%#RX32\n", idMsr));
13821
13822#ifdef VBOX_STRICT
13823 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
13824 {
13825 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
13826 && idMsr != MSR_K6_EFER)
13827 {
13828 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
13829 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
13830 }
13831 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
13832 {
13833 Assert(pVmcsInfo->pvMsrBitmap);
13834 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
13835 if (fMsrpm & VMXMSRPM_ALLOW_RD)
13836 {
13837 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
13838 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
13839 }
13840 }
13841 }
13842#endif
13843
13844 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbInstr);
13845 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
13846 if (rcStrict == VINF_SUCCESS)
13847 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
13848 | HM_CHANGED_GUEST_RAX | HM_CHANGED_GUEST_RDX);
13849 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13850 {
13851 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13852 rcStrict = VINF_SUCCESS;
13853 }
13854 else
13855 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ, ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
13856
13857 return rcStrict;
13858}
13859
13860
13861/**
13862 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
13863 */
13864HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13865{
13866 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13867
13868 /** @todo Optimize this: We currently drag in in the whole MSR state
13869 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
13870 * MSRs required. That would require changes to IEM and possibly CPUM too.
13871 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
13872 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
13873 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
13874
13875 /*
13876 * The FS and GS base MSRs are not part of the above all-MSRs mask.
13877 * Although we don't need to fetch the base as it will be overwritten shortly, while
13878 * loading guest-state we would also load the entire segment register including limit
13879 * and attributes and thus we need to load them here.
13880 */
13881 switch (idMsr)
13882 {
13883 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
13884 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
13885 }
13886
13887 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13888 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13889 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fImport);
13890 AssertRCReturn(rc, rc);
13891
13892 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
13893
13894 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbInstr);
13895 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
13896
13897 if (rcStrict == VINF_SUCCESS)
13898 {
13899 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13900
13901 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
13902 if ( idMsr == MSR_IA32_APICBASE
13903 || ( idMsr >= MSR_IA32_X2APIC_START
13904 && idMsr <= MSR_IA32_X2APIC_END))
13905 {
13906 /*
13907 * We've already saved the APIC related guest-state (TPR) in post-run phase.
13908 * When full APIC register virtualization is implemented we'll have to make
13909 * sure APIC state is saved from the VMCS before IEM changes it.
13910 */
13911 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
13912 }
13913 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
13914 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
13915 else if (idMsr == MSR_K6_EFER)
13916 {
13917 /*
13918 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
13919 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
13920 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
13921 */
13922 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
13923 }
13924
13925 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */
13926 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
13927 {
13928 switch (idMsr)
13929 {
13930 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
13931 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
13932 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
13933 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS); break;
13934 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS); break;
13935 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
13936 default:
13937 {
13938 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
13939 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
13940 else if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
13941 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
13942 break;
13943 }
13944 }
13945 }
13946#ifdef VBOX_STRICT
13947 else
13948 {
13949 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
13950 switch (idMsr)
13951 {
13952 case MSR_IA32_SYSENTER_CS:
13953 case MSR_IA32_SYSENTER_EIP:
13954 case MSR_IA32_SYSENTER_ESP:
13955 case MSR_K8_FS_BASE:
13956 case MSR_K8_GS_BASE:
13957 {
13958 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
13959 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
13960 }
13961
13962 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
13963 default:
13964 {
13965 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
13966 {
13967 /* EFER MSR writes are always intercepted. */
13968 if (idMsr != MSR_K6_EFER)
13969 {
13970 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
13971 idMsr));
13972 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
13973 }
13974 }
13975
13976 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
13977 {
13978 Assert(pVmcsInfo->pvMsrBitmap);
13979 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
13980 if (fMsrpm & VMXMSRPM_ALLOW_WR)
13981 {
13982 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
13983 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
13984 }
13985 }
13986 break;
13987 }
13988 }
13989 }
13990#endif /* VBOX_STRICT */
13991 }
13992 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13993 {
13994 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13995 rcStrict = VINF_SUCCESS;
13996 }
13997 else
13998 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE, ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
13999
14000 return rcStrict;
14001}
14002
14003
14004/**
14005 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
14006 */
14007HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14008{
14009 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14010
14011 /** @todo The guest has likely hit a contended spinlock. We might want to
14012 * poke a schedule different guest VCPU. */
14013 int rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
14014 if (RT_SUCCESS(rc))
14015 return VINF_EM_RAW_INTERRUPT;
14016
14017 AssertMsgFailed(("hmR0VmxExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
14018 return rc;
14019}
14020
14021
14022/**
14023 * VM-exit handler for when the TPR value is lowered below the specified
14024 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
14025 */
14026HMVMX_EXIT_NSRC_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14027{
14028 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14029 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
14030
14031 /*
14032 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
14033 * We'll re-evaluate pending interrupts and inject them before the next VM
14034 * entry so we can just continue execution here.
14035 */
14036 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
14037 return VINF_SUCCESS;
14038}
14039
14040
14041/**
14042 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
14043 * VM-exit.
14044 *
14045 * @retval VINF_SUCCESS when guest execution can continue.
14046 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
14047 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
14048 * incompatible guest state for VMX execution (real-on-v86 case).
14049 */
14050HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14051{
14052 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14053 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
14054
14055 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14056 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
14057 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
14058 AssertRCReturn(rc, rc);
14059
14060 VBOXSTRICTRC rcStrict;
14061 PVM pVM = pVCpu->CTX_SUFF(pVM);
14062 uint64_t const uExitQual = pVmxTransient->uExitQual;
14063 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
14064 switch (uAccessType)
14065 {
14066 /*
14067 * MOV to CRx.
14068 */
14069 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
14070 {
14071 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
14072 AssertRCReturn(rc, rc);
14073
14074 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
14075 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
14076 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
14077 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
14078
14079 /*
14080 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
14081 * - When nested paging isn't used.
14082 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
14083 * - We are executing in the VM debug loop.
14084 */
14085 Assert( iCrReg != 3
14086 || !pVM->hm.s.fNestedPaging
14087 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
14088 || pVCpu->hm.s.fUsingDebugLoop);
14089
14090 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
14091 Assert( iCrReg != 8
14092 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
14093
14094 rcStrict = hmR0VmxExitMovToCrX(pVCpu, pVmcsInfo, pVmxTransient->cbInstr, iGReg, iCrReg);
14095 AssertMsg( rcStrict == VINF_SUCCESS
14096 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14097
14098 /*
14099 * This is a kludge for handling switches back to real mode when we try to use
14100 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
14101 * deal with special selector values, so we have to return to ring-3 and run
14102 * there till the selector values are V86 mode compatible.
14103 *
14104 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
14105 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
14106 * this function.
14107 */
14108 if ( iCrReg == 0
14109 && rcStrict == VINF_SUCCESS
14110 && !pVM->hm.s.vmx.fUnrestrictedGuest
14111 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
14112 && (uOldCr0 & X86_CR0_PE)
14113 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
14114 {
14115 /** @todo Check selectors rather than returning all the time. */
14116 Assert(!pVmxTransient->fIsNestedGuest);
14117 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
14118 rcStrict = VINF_EM_RESCHEDULE_REM;
14119 }
14120 break;
14121 }
14122
14123 /*
14124 * MOV from CRx.
14125 */
14126 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
14127 {
14128 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
14129 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
14130
14131 /*
14132 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
14133 * - When nested paging isn't used.
14134 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
14135 * - We are executing in the VM debug loop.
14136 */
14137 Assert( iCrReg != 3
14138 || !pVM->hm.s.fNestedPaging
14139 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
14140 || pVCpu->hm.s.fUsingDebugLoop);
14141
14142 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
14143 Assert( iCrReg != 8
14144 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
14145
14146 rcStrict = hmR0VmxExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbInstr, iGReg, iCrReg);
14147 break;
14148 }
14149
14150 /*
14151 * CLTS (Clear Task-Switch Flag in CR0).
14152 */
14153 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
14154 {
14155 rcStrict = hmR0VmxExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbInstr);
14156 break;
14157 }
14158
14159 /*
14160 * LMSW (Load Machine-Status Word into CR0).
14161 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
14162 */
14163 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
14164 {
14165 RTGCPTR GCPtrEffDst;
14166 uint8_t const cbInstr = pVmxTransient->cbInstr;
14167 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
14168 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
14169 if (fMemOperand)
14170 {
14171 rc = hmR0VmxReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
14172 AssertRCReturn(rc, rc);
14173 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
14174 }
14175 else
14176 GCPtrEffDst = NIL_RTGCPTR;
14177 rcStrict = hmR0VmxExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
14178 break;
14179 }
14180
14181 default:
14182 {
14183 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
14184 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
14185 }
14186 }
14187
14188 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
14189 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
14190 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
14191
14192 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
14193 NOREF(pVM);
14194 return rcStrict;
14195}
14196
14197
14198/**
14199 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
14200 * VM-exit.
14201 */
14202HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14203{
14204 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14205 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
14206
14207 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14208 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14209 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
14210 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
14211 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
14212 | CPUMCTX_EXTRN_EFER);
14213 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
14214 AssertRCReturn(rc, rc);
14215
14216 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
14217 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
14218 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
14219 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
14220 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
14221 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
14222 bool const fDbgStepping = pVCpu->hm.s.fSingleInstruction;
14223 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
14224
14225 /*
14226 * Update exit history to see if this exit can be optimized.
14227 */
14228 VBOXSTRICTRC rcStrict;
14229 PCEMEXITREC pExitRec = NULL;
14230 if ( !fGstStepping
14231 && !fDbgStepping)
14232 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
14233 !fIOString
14234 ? !fIOWrite
14235 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
14236 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
14237 : !fIOWrite
14238 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
14239 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
14240 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
14241 if (!pExitRec)
14242 {
14243 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
14244 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
14245
14246 uint32_t const cbValue = s_aIOSizes[uIOSize];
14247 uint32_t const cbInstr = pVmxTransient->cbInstr;
14248 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
14249 PVM pVM = pVCpu->CTX_SUFF(pVM);
14250 if (fIOString)
14251 {
14252 /*
14253 * INS/OUTS - I/O String instruction.
14254 *
14255 * Use instruction-information if available, otherwise fall back on
14256 * interpreting the instruction.
14257 */
14258 Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
14259 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
14260 bool const fInsOutsInfo = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
14261 if (fInsOutsInfo)
14262 {
14263 int rc2 = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
14264 AssertRCReturn(rc2, rc2);
14265 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
14266 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
14267 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
14268 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
14269 if (fIOWrite)
14270 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
14271 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
14272 else
14273 {
14274 /*
14275 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
14276 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
14277 * See Intel Instruction spec. for "INS".
14278 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
14279 */
14280 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
14281 }
14282 }
14283 else
14284 rcStrict = IEMExecOne(pVCpu);
14285
14286 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
14287 fUpdateRipAlready = true;
14288 }
14289 else
14290 {
14291 /*
14292 * IN/OUT - I/O instruction.
14293 */
14294 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
14295 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
14296 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
14297 if (fIOWrite)
14298 {
14299 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
14300 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
14301 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
14302 && !pCtx->eflags.Bits.u1TF)
14303 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
14304 }
14305 else
14306 {
14307 uint32_t u32Result = 0;
14308 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
14309 if (IOM_SUCCESS(rcStrict))
14310 {
14311 /* Save result of I/O IN instr. in AL/AX/EAX. */
14312 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
14313 }
14314 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
14315 && !pCtx->eflags.Bits.u1TF)
14316 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
14317 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
14318 }
14319 }
14320
14321 if (IOM_SUCCESS(rcStrict))
14322 {
14323 if (!fUpdateRipAlready)
14324 {
14325 hmR0VmxAdvanceGuestRipBy(pVCpu, cbInstr);
14326 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
14327 }
14328
14329 /*
14330 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
14331 * while booting Fedora 17 64-bit guest.
14332 *
14333 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
14334 */
14335 if (fIOString)
14336 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
14337
14338 /*
14339 * If any I/O breakpoints are armed, we need to check if one triggered
14340 * and take appropriate action.
14341 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
14342 */
14343 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
14344 AssertRCReturn(rc, rc);
14345
14346 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
14347 * execution engines about whether hyper BPs and such are pending. */
14348 uint32_t const uDr7 = pCtx->dr[7];
14349 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
14350 && X86_DR7_ANY_RW_IO(uDr7)
14351 && (pCtx->cr4 & X86_CR4_DE))
14352 || DBGFBpIsHwIoArmed(pVM)))
14353 {
14354 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
14355
14356 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
14357 VMMRZCallRing3Disable(pVCpu);
14358 HM_DISABLE_PREEMPT(pVCpu);
14359
14360 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
14361
14362 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
14363 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
14364 {
14365 /* Raise #DB. */
14366 if (fIsGuestDbgActive)
14367 ASMSetDR6(pCtx->dr[6]);
14368 if (pCtx->dr[7] != uDr7)
14369 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR7;
14370
14371 hmR0VmxSetPendingXcptDB(pVCpu);
14372 }
14373 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
14374 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
14375 else if ( rcStrict2 != VINF_SUCCESS
14376 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
14377 rcStrict = rcStrict2;
14378 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
14379
14380 HM_RESTORE_PREEMPT();
14381 VMMRZCallRing3Enable(pVCpu);
14382 }
14383 }
14384
14385#ifdef VBOX_STRICT
14386 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
14387 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
14388 Assert(!fIOWrite);
14389 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
14390 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14391 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
14392 Assert(fIOWrite);
14393 else
14394 {
14395# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
14396 * statuses, that the VMM device and some others may return. See
14397 * IOM_SUCCESS() for guidance. */
14398 AssertMsg( RT_FAILURE(rcStrict)
14399 || rcStrict == VINF_SUCCESS
14400 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14401 || rcStrict == VINF_EM_DBG_BREAKPOINT
14402 || rcStrict == VINF_EM_RAW_GUEST_TRAP
14403 || rcStrict == VINF_EM_RAW_TO_R3
14404 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14405# endif
14406 }
14407#endif
14408 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
14409 }
14410 else
14411 {
14412 /*
14413 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
14414 */
14415 int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
14416 AssertRCReturn(rc2, rc2);
14417 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead
14418 : fIOWrite ? &pVCpu->hm.s.StatExitIOStringWrite : &pVCpu->hm.s.StatExitIOStringRead);
14419 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
14420 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
14421 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
14422 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
14423
14424 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
14425 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
14426
14427 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
14428 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
14429 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
14430 }
14431 return rcStrict;
14432}
14433
14434
14435/**
14436 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
14437 * VM-exit.
14438 */
14439HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14440{
14441 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14442
14443 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
14444 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
14445 AssertRCReturn(rc, rc);
14446 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
14447 {
14448 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
14449 AssertRCReturn(rc, rc);
14450 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
14451 {
14452 uint32_t uErrCode;
14453 RTGCUINTPTR GCPtrFaultAddress;
14454 uint32_t const uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
14455 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
14456 bool const fErrorCodeValid = VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo);
14457 if (fErrorCodeValid)
14458 {
14459 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
14460 AssertRCReturn(rc, rc);
14461 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
14462 }
14463 else
14464 uErrCode = 0;
14465
14466 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
14467 && uVector == X86_XCPT_PF)
14468 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
14469 else
14470 GCPtrFaultAddress = 0;
14471
14472 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
14473 AssertRCReturn(rc, rc);
14474
14475 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
14476 pVmxTransient->cbInstr, uErrCode, GCPtrFaultAddress);
14477
14478 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", uIntType, uVector));
14479 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
14480 return VINF_EM_RAW_INJECT_TRPM_EVENT;
14481 }
14482 }
14483
14484 /* Fall back to the interpreter to emulate the task-switch. */
14485 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
14486 return VERR_EM_INTERPRETER;
14487}
14488
14489
14490/**
14491 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
14492 */
14493HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14494{
14495 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14496
14497 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14498 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
14499 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
14500 AssertRCReturn(rc, rc);
14501 return VINF_EM_DBG_STEPPED;
14502}
14503
14504
14505/**
14506 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
14507 */
14508HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14509{
14510 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14511 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
14512
14513 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
14514 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
14515 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
14516 {
14517 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
14518 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
14519 {
14520 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret);
14521 return VINF_EM_RAW_INJECT_TRPM_EVENT;
14522 }
14523 }
14524 else
14525 {
14526 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
14527 rcStrict1 = VINF_SUCCESS;
14528 return rcStrict1;
14529 }
14530
14531 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
14532 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14533 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
14534 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
14535 AssertRCReturn(rc, rc);
14536
14537 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
14538 uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
14539 VBOXSTRICTRC rcStrict2;
14540 switch (uAccessType)
14541 {
14542 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
14543 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
14544 {
14545 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
14546 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
14547 ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
14548
14549 RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
14550 GCPhys &= PAGE_BASE_GC_MASK;
14551 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
14552 PVM pVM = pVCpu->CTX_SUFF(pVM);
14553 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
14554 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
14555
14556 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14557 rcStrict2 = IOMMMIOPhysHandler(pVM, pVCpu,
14558 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW,
14559 CPUMCTX2CORE(pCtx), GCPhys);
14560 Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
14561 if ( rcStrict2 == VINF_SUCCESS
14562 || rcStrict2 == VERR_PAGE_TABLE_NOT_PRESENT
14563 || rcStrict2 == VERR_PAGE_NOT_PRESENT)
14564 {
14565 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
14566 | HM_CHANGED_GUEST_APIC_TPR);
14567 rcStrict2 = VINF_SUCCESS;
14568 }
14569 break;
14570 }
14571
14572 default:
14573 Log4Func(("uAccessType=%#x\n", uAccessType));
14574 rcStrict2 = VINF_EM_RAW_EMULATE_INSTR;
14575 break;
14576 }
14577
14578 if (rcStrict2 != VINF_SUCCESS)
14579 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchApicAccessToR3);
14580 return rcStrict2;
14581}
14582
14583
14584/**
14585 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
14586 * VM-exit.
14587 */
14588HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14589{
14590 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14591
14592 /* We should -not- get this VM-exit if the guest's debug registers were active. */
14593 if (pVmxTransient->fWasGuestDebugStateActive)
14594 {
14595 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
14596 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
14597 }
14598
14599 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14600 if ( !pVCpu->hm.s.fSingleInstruction
14601 && !pVmxTransient->fWasHyperDebugStateActive)
14602 {
14603 Assert(!DBGFIsStepping(pVCpu));
14604 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
14605
14606 /* Don't intercept MOV DRx any more. */
14607 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
14608 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
14609 AssertRCReturn(rc, rc);
14610
14611 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
14612 VMMRZCallRing3Disable(pVCpu);
14613 HM_DISABLE_PREEMPT(pVCpu);
14614
14615 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
14616 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
14617 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
14618
14619 HM_RESTORE_PREEMPT();
14620 VMMRZCallRing3Enable(pVCpu);
14621
14622#ifdef VBOX_WITH_STATISTICS
14623 rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
14624 AssertRCReturn(rc, rc);
14625 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
14626 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
14627 else
14628 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
14629#endif
14630 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
14631 return VINF_SUCCESS;
14632 }
14633
14634 /*
14635 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
14636 * The EFER MSR is always up-to-date.
14637 * Update the segment registers and DR7 from the CPU.
14638 */
14639 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14640 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
14641 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
14642 AssertRCReturn(rc, rc);
14643 Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
14644
14645 PVM pVM = pVCpu->CTX_SUFF(pVM);
14646 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
14647 {
14648 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
14649 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
14650 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
14651 if (RT_SUCCESS(rc))
14652 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
14653 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
14654 }
14655 else
14656 {
14657 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
14658 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
14659 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
14660 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
14661 }
14662
14663 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
14664 if (RT_SUCCESS(rc))
14665 {
14666 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
14667 AssertRCReturn(rc2, rc2);
14668 return VINF_SUCCESS;
14669 }
14670 return rc;
14671}
14672
14673
14674/**
14675 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
14676 * Conditional VM-exit.
14677 */
14678HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14679{
14680 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14681 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
14682
14683 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
14684 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
14685 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
14686 {
14687 /* If event delivery causes an EPT misconfig (MMIO), go back to instruction emulation as otherwise
14688 injecting the original pending event would most likely cause the same EPT misconfig VM-exit. */
14689 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
14690 {
14691 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret);
14692 return VINF_EM_RAW_INJECT_TRPM_EVENT;
14693 }
14694 }
14695 else
14696 {
14697 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
14698 rcStrict1 = VINF_SUCCESS;
14699 return rcStrict1;
14700 }
14701
14702 /*
14703 * Get sufficent state and update the exit history entry.
14704 */
14705 RTGCPHYS GCPhys;
14706 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14707 int rc = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys);
14708 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
14709 AssertRCReturn(rc, rc);
14710
14711 VBOXSTRICTRC rcStrict;
14712 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
14713 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
14714 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
14715 if (!pExitRec)
14716 {
14717 /*
14718 * If we succeed, resume guest execution.
14719 * If we fail in interpreting the instruction because we couldn't get the guest physical address
14720 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
14721 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
14722 * weird case. See @bugref{6043}.
14723 */
14724 PVM pVM = pVCpu->CTX_SUFF(pVM);
14725 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14726 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
14727 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
14728 if ( rcStrict == VINF_SUCCESS
14729 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
14730 || rcStrict == VERR_PAGE_NOT_PRESENT)
14731 {
14732 /* Successfully handled MMIO operation. */
14733 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
14734 | HM_CHANGED_GUEST_APIC_TPR);
14735 rcStrict = VINF_SUCCESS;
14736 }
14737 }
14738 else
14739 {
14740 /*
14741 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
14742 */
14743 int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
14744 AssertRCReturn(rc2, rc2);
14745
14746 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
14747 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
14748
14749 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
14750 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
14751
14752 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
14753 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
14754 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
14755 }
14756 return VBOXSTRICTRC_TODO(rcStrict);
14757}
14758
14759
14760/**
14761 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
14762 * VM-exit.
14763 */
14764HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14765{
14766 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14767 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
14768
14769 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
14770 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
14771 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
14772 {
14773 /* In the unlikely case that the EPT violation happened as a result of delivering an event, log it. */
14774 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
14775 Log4Func(("EPT violation with an event pending u64IntInfo=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo));
14776 }
14777 else
14778 {
14779 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
14780 rcStrict1 = VINF_SUCCESS;
14781 return rcStrict1;
14782 }
14783
14784 RTGCPHYS GCPhys;
14785 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14786 int rc = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys);
14787 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
14788 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
14789 AssertRCReturn(rc, rc);
14790
14791 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
14792 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQual));
14793
14794 RTGCUINT uErrorCode = 0;
14795 if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_INSTR_FETCH)
14796 uErrorCode |= X86_TRAP_PF_ID;
14797 if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_DATA_WRITE)
14798 uErrorCode |= X86_TRAP_PF_RW;
14799 if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT)
14800 uErrorCode |= X86_TRAP_PF_P;
14801
14802 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
14803
14804
14805 /* Handle the pagefault trap for the nested shadow table. */
14806 PVM pVM = pVCpu->CTX_SUFF(pVM);
14807 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14808
14809 Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x cs:rip=%#04x:%#RX64\n", pVmxTransient->uExitQual, GCPhys, uErrorCode,
14810 pCtx->cs.Sel, pCtx->rip));
14811
14812 VBOXSTRICTRC rcStrict2 = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
14813 TRPMResetTrap(pVCpu);
14814
14815 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
14816 if ( rcStrict2 == VINF_SUCCESS
14817 || rcStrict2 == VERR_PAGE_TABLE_NOT_PRESENT
14818 || rcStrict2 == VERR_PAGE_NOT_PRESENT)
14819 {
14820 /* Successfully synced our nested page tables. */
14821 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
14822 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
14823 return VINF_SUCCESS;
14824 }
14825
14826 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
14827 return rcStrict2;
14828}
14829
14830/** @} */
14831
14832/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
14833/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit exception handlers =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
14834/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
14835
14836/**
14837 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
14838 */
14839static VBOXSTRICTRC hmR0VmxExitXcptMF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14840{
14841 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14842 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
14843
14844 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
14845 AssertRCReturn(rc, rc);
14846
14847 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
14848 {
14849 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
14850 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
14851
14852 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
14853 * provides VM-exit instruction length. If this causes problem later,
14854 * disassemble the instruction like it's done on AMD-V. */
14855 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
14856 AssertRCReturn(rc2, rc2);
14857 return rc;
14858 }
14859
14860 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
14861 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
14862 return rc;
14863}
14864
14865
14866/**
14867 * VM-exit exception handler for \#BP (Breakpoint exception).
14868 */
14869static VBOXSTRICTRC hmR0VmxExitXcptBP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14870{
14871 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14872 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
14873
14874 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
14875 AssertRCReturn(rc, rc);
14876
14877 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14878 rc = DBGFRZTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
14879 if (rc == VINF_EM_RAW_GUEST_TRAP)
14880 {
14881 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
14882 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
14883 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
14884 AssertRCReturn(rc, rc);
14885
14886 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
14887 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
14888 }
14889
14890 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
14891 return rc;
14892}
14893
14894
14895/**
14896 * VM-exit exception handler for \#AC (alignment check exception).
14897 */
14898static VBOXSTRICTRC hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14899{
14900 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14901
14902 /*
14903 * Re-inject it. We'll detect any nesting before getting here.
14904 */
14905 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
14906 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
14907 AssertRCReturn(rc, rc);
14908 Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
14909
14910 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
14911 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
14912 return VINF_SUCCESS;
14913}
14914
14915
14916/**
14917 * VM-exit exception handler for \#DB (Debug exception).
14918 */
14919static VBOXSTRICTRC hmR0VmxExitXcptDB(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14920{
14921 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14922 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
14923
14924 /*
14925 * Get the DR6-like values from the VM-exit qualification and pass it to DBGF
14926 * for processing.
14927 */
14928 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
14929
14930 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
14931 uint64_t const uDR6 = X86_DR6_INIT_VAL
14932 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
14933 | X86_DR6_BD | X86_DR6_BS));
14934
14935 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14936 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
14937 Log6Func(("rc=%Rrc\n", rc));
14938 if (rc == VINF_EM_RAW_GUEST_TRAP)
14939 {
14940 /*
14941 * The exception was for the guest. Update DR6, DR7.GD and
14942 * IA32_DEBUGCTL.LBR before forwarding it.
14943 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
14944 */
14945 VMMRZCallRing3Disable(pVCpu);
14946 HM_DISABLE_PREEMPT(pVCpu);
14947
14948 pCtx->dr[6] &= ~X86_DR6_B_MASK;
14949 pCtx->dr[6] |= uDR6;
14950 if (CPUMIsGuestDebugStateActive(pVCpu))
14951 ASMSetDR6(pCtx->dr[6]);
14952
14953 HM_RESTORE_PREEMPT();
14954 VMMRZCallRing3Enable(pVCpu);
14955
14956 rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
14957 AssertRCReturn(rc, rc);
14958
14959 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
14960 pCtx->dr[7] &= ~X86_DR7_GD;
14961
14962 /* Paranoia. */
14963 pCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
14964 pCtx->dr[7] |= X86_DR7_RA1_MASK;
14965
14966 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pCtx->dr[7]);
14967 AssertRCReturn(rc, rc);
14968
14969 /*
14970 * Raise #DB in the guest.
14971 *
14972 * It is important to reflect exactly what the VM-exit gave us (preserving the
14973 * interruption-type) rather than use hmR0VmxSetPendingXcptDB() as the #DB could've
14974 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
14975 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
14976 *
14977 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
14978 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
14979 */
14980 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
14981 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
14982 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
14983 AssertRCReturn(rc, rc);
14984 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
14985 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
14986 return VINF_SUCCESS;
14987 }
14988
14989 /*
14990 * Not a guest trap, must be a hypervisor related debug event then.
14991 * Update DR6 in case someone is interested in it.
14992 */
14993 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
14994 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
14995 CPUMSetHyperDR6(pVCpu, uDR6);
14996
14997 return rc;
14998}
14999
15000
15001/**
15002 * Hacks its way around the lovely mesa driver's backdoor accesses.
15003 *
15004 * @sa hmR0SvmHandleMesaDrvGp.
15005 */
15006static int hmR0VmxHandleMesaDrvGp(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
15007{
15008 LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
15009 RT_NOREF(pCtx);
15010
15011 /* For now we'll just skip the instruction. */
15012 return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
15013}
15014
15015
15016/**
15017 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
15018 * backdoor logging w/o checking what it is running inside.
15019 *
15020 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
15021 * backdoor port and magic numbers loaded in registers.
15022 *
15023 * @returns true if it is, false if it isn't.
15024 * @sa hmR0SvmIsMesaDrvGp.
15025 */
15026DECLINLINE(bool) hmR0VmxIsMesaDrvGp(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
15027{
15028 /* 0xed: IN eAX,dx */
15029 uint8_t abInstr[1];
15030 if (pVmxTransient->cbInstr != sizeof(abInstr))
15031 return false;
15032
15033 /* Check that it is #GP(0). */
15034 if (pVmxTransient->uExitIntErrorCode != 0)
15035 return false;
15036
15037 /* Check magic and port. */
15038 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
15039 /*Log(("hmR0VmxIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
15040 if (pCtx->rax != UINT32_C(0x564d5868))
15041 return false;
15042 if (pCtx->dx != UINT32_C(0x5658))
15043 return false;
15044
15045 /* Flat ring-3 CS. */
15046 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
15047 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
15048 /*Log(("hmR0VmxIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
15049 if (pCtx->cs.Attr.n.u2Dpl != 3)
15050 return false;
15051 if (pCtx->cs.u64Base != 0)
15052 return false;
15053
15054 /* Check opcode. */
15055 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
15056 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
15057 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
15058 /*Log(("hmR0VmxIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
15059 if (RT_FAILURE(rc))
15060 return false;
15061 if (abInstr[0] != 0xed)
15062 return false;
15063
15064 return true;
15065}
15066
15067
15068/**
15069 * VM-exit exception handler for \#GP (General-protection exception).
15070 *
15071 * @remarks Requires pVmxTransient->uExitIntInfo to be up-to-date.
15072 */
15073static VBOXSTRICTRC hmR0VmxExitXcptGP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15074{
15075 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15076 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
15077
15078 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
15079 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
15080 if (pVmcsInfo->RealMode.fRealOnV86Active)
15081 { /* likely */ }
15082 else
15083 {
15084#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
15085 Assert(pVCpu->hm.s.fUsingDebugLoop || pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv);
15086#endif
15087 /* If the guest is not in real-mode or we have unrestricted guest execution support, reflect #GP to the guest. */
15088 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
15089 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
15090 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15091 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
15092 AssertRCReturn(rc, rc);
15093 Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
15094 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
15095
15096 if ( !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
15097 || !hmR0VmxIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
15098 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
15099 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
15100 else
15101 rc = hmR0VmxHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
15102 return rc;
15103 }
15104
15105 Assert(CPUMIsGuestInRealModeEx(pCtx));
15106 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
15107
15108 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
15109 AssertRCReturn(rc, rc);
15110
15111 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
15112 if (rcStrict == VINF_SUCCESS)
15113 {
15114 if (!CPUMIsGuestInRealModeEx(pCtx))
15115 {
15116 /*
15117 * The guest is no longer in real-mode, check if we can continue executing the
15118 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
15119 */
15120 pVmcsInfo->RealMode.fRealOnV86Active = false;
15121 if (HMCanExecuteVmxGuest(pVCpu, pCtx))
15122 {
15123 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
15124 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
15125 }
15126 else
15127 {
15128 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
15129 rcStrict = VINF_EM_RESCHEDULE;
15130 }
15131 }
15132 else
15133 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
15134 }
15135 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15136 {
15137 rcStrict = VINF_SUCCESS;
15138 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15139 }
15140 return VBOXSTRICTRC_VAL(rcStrict);
15141}
15142
15143
15144/**
15145 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
15146 * the exception reported in the VMX transient structure back into the VM.
15147 *
15148 * @remarks Requires uExitIntInfo in the VMX transient structure to be
15149 * up-to-date.
15150 */
15151static VBOXSTRICTRC hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15152{
15153 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15154#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
15155 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
15156 AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVmcsInfo->RealMode.fRealOnV86Active,
15157 ("uVector=%#x u32XcptBitmap=%#X32\n",
15158 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
15159 NOREF(pVmcsInfo);
15160#endif
15161
15162 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
15163 hmR0VmxCheckExitDueToEventDelivery(). */
15164 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
15165 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15166 AssertRCReturn(rc, rc);
15167 Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
15168
15169#ifdef DEBUG_ramshankar
15170 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
15171 Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n",
15172 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pCtx->cs.Sel, pCtx->rip));
15173#endif
15174
15175 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
15176 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
15177 return VINF_SUCCESS;
15178}
15179
15180
15181/**
15182 * VM-exit exception handler for \#PF (Page-fault exception).
15183 */
15184static VBOXSTRICTRC hmR0VmxExitXcptPF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15185{
15186 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15187 PVM pVM = pVCpu->CTX_SUFF(pVM);
15188 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
15189 rc |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
15190 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
15191 AssertRCReturn(rc, rc);
15192
15193 if (!pVM->hm.s.fNestedPaging)
15194 { /* likely */ }
15195 else
15196 {
15197#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF)
15198 Assert(pVCpu->hm.s.fUsingDebugLoop);
15199#endif
15200 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
15201 if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF))
15202 {
15203 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
15204 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
15205 }
15206 else
15207 {
15208 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
15209 hmR0VmxSetPendingXcptDF(pVCpu);
15210 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
15211 }
15212 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
15213 return rc;
15214 }
15215
15216 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
15217 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
15218 if (pVmxTransient->fVectoringPF)
15219 {
15220 Assert(pVCpu->hm.s.Event.fPending);
15221 return VINF_EM_RAW_INJECT_TRPM_EVENT;
15222 }
15223
15224 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
15225 rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
15226 AssertRCReturn(rc, rc);
15227
15228 Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQual, pCtx->cs.Sel,
15229 pCtx->rip, pVmxTransient->uExitIntErrorCode, pCtx->cr3));
15230
15231 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
15232 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
15233
15234 Log4Func(("#PF: rc=%Rrc\n", rc));
15235 if (rc == VINF_SUCCESS)
15236 {
15237 /*
15238 * This is typically a shadow page table sync or a MMIO instruction. But we may have
15239 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
15240 */
15241 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
15242 TRPMResetTrap(pVCpu);
15243 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
15244 return rc;
15245 }
15246
15247 if (rc == VINF_EM_RAW_GUEST_TRAP)
15248 {
15249 if (!pVmxTransient->fVectoringDoublePF)
15250 {
15251 /* It's a guest page fault and needs to be reflected to the guest. */
15252 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
15253 TRPMResetTrap(pVCpu);
15254 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
15255 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
15256 uGstErrorCode, pVmxTransient->uExitQual);
15257 }
15258 else
15259 {
15260 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
15261 TRPMResetTrap(pVCpu);
15262 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
15263 hmR0VmxSetPendingXcptDF(pVCpu);
15264 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
15265 }
15266
15267 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
15268 return VINF_SUCCESS;
15269 }
15270
15271 TRPMResetTrap(pVCpu);
15272 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
15273 return rc;
15274}
15275
15276
15277/**
15278 * VM-exit helper for LMSW.
15279 */
15280static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw,
15281 RTGCPTR GCPtrEffDst)
15282{
15283 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
15284 AssertRCReturn(rc, rc);
15285
15286 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
15287 AssertMsg( rcStrict == VINF_SUCCESS
15288 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
15289
15290 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
15291 if (rcStrict == VINF_IEM_RAISED_XCPT)
15292 {
15293 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15294 rcStrict = VINF_SUCCESS;
15295 }
15296
15297 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
15298 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
15299 return rcStrict;
15300}
15301
15302
15303/**
15304 * VM-exit helper for CLTS.
15305 */
15306static VBOXSTRICTRC hmR0VmxExitClts(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
15307{
15308 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
15309 AssertRCReturn(rc, rc);
15310
15311 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
15312 AssertMsg( rcStrict == VINF_SUCCESS
15313 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
15314
15315 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
15316 if (rcStrict == VINF_IEM_RAISED_XCPT)
15317 {
15318 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15319 rcStrict = VINF_SUCCESS;
15320 }
15321
15322 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
15323 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
15324 return rcStrict;
15325}
15326
15327
15328/**
15329 * VM-exit helper for MOV from CRx (CRx read).
15330 */
15331static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15332{
15333 Assert(iCrReg < 16);
15334 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
15335
15336 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
15337 AssertRCReturn(rc, rc);
15338
15339 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
15340 AssertMsg( rcStrict == VINF_SUCCESS
15341 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
15342
15343 if (iGReg == X86_GREG_xSP)
15344 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
15345 else
15346 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
15347#ifdef VBOX_WITH_STATISTICS
15348 switch (iCrReg)
15349 {
15350 case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
15351 case 2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break;
15352 case 3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break;
15353 case 4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break;
15354 case 8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break;
15355 }
15356#endif
15357 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
15358 return rcStrict;
15359}
15360
15361
15362/**
15363 * VM-exit helper for MOV to CRx (CRx write).
15364 */
15365static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15366{
15367 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
15368 AssertRCReturn(rc, rc);
15369
15370 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
15371 AssertMsg( rcStrict == VINF_SUCCESS
15372 || rcStrict == VINF_IEM_RAISED_XCPT
15373 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
15374
15375 switch (iCrReg)
15376 {
15377 case 0:
15378 {
15379 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
15380 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
15381 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
15382 break;
15383 }
15384
15385 case 2:
15386 {
15387 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write);
15388 /* Nothing to do here, CR2 it's not part of the VMCS. */
15389 break;
15390 }
15391
15392 case 3:
15393 {
15394 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
15395 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write);
15396 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
15397 break;
15398 }
15399
15400 case 4:
15401 {
15402 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
15403 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write);
15404 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
15405 pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
15406 break;
15407 }
15408
15409 case 8:
15410 {
15411 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
15412 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
15413 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write);
15414 break;
15415 }
15416
15417 default:
15418 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
15419 break;
15420 }
15421
15422 if (rcStrict == VINF_IEM_RAISED_XCPT)
15423 {
15424 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15425 rcStrict = VINF_SUCCESS;
15426 }
15427 return rcStrict;
15428}
15429
15430
15431/**
15432 * VM-exit helper for handling host NMIs.
15433 */
15434static VBOXSTRICTRC hmR0VmxExitHostNmi(PVMCPU pVCpu)
15435{
15436 VMXDispatchHostNmi();
15437
15438 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
15439 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
15440 return VINF_SUCCESS;
15441}
15442
15443
15444#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15445/** @name VMX instruction handlers.
15446 * @{
15447 */
15448/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
15449/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VMX instructions VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
15450/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
15451
15452/**
15453 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
15454 */
15455HMVMX_EXIT_DECL hmR0VmxExitVmclear(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15456{
15457 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15458
15459 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15460 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
15461 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
15462 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
15463 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
15464 AssertRCReturn(rc, rc);
15465
15466 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15467
15468 VMXVEXITINFO ExitInfo;
15469 RT_ZERO(ExitInfo);
15470 ExitInfo.uReason = pVmxTransient->uExitReason;
15471 ExitInfo.u64Qual = pVmxTransient->uExitQual;
15472 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
15473 ExitInfo.cbInstr = pVmxTransient->cbInstr;
15474 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
15475
15476 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
15477 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15478 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
15479 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15480 {
15481 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15482 rcStrict = VINF_SUCCESS;
15483 }
15484 return rcStrict;
15485}
15486
15487
15488/**
15489 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
15490 */
15491HMVMX_EXIT_DECL hmR0VmxExitVmlaunch(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15492{
15493 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15494
15495 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
15496 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
15497 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15498 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
15499 AssertRCReturn(rc, rc);
15500
15501 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15502
15503 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbInstr, VMXINSTRID_VMLAUNCH);
15504 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15505 {
15506 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
15507 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
15508 }
15509 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
15510 return rcStrict;
15511}
15512
15513
15514/**
15515 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
15516 */
15517HMVMX_EXIT_DECL hmR0VmxExitVmptrld(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15518{
15519 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15520
15521 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15522 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
15523 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
15524 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
15525 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
15526 AssertRCReturn(rc, rc);
15527
15528 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15529
15530 VMXVEXITINFO ExitInfo;
15531 RT_ZERO(ExitInfo);
15532 ExitInfo.uReason = pVmxTransient->uExitReason;
15533 ExitInfo.u64Qual = pVmxTransient->uExitQual;
15534 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
15535 ExitInfo.cbInstr = pVmxTransient->cbInstr;
15536 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
15537
15538 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
15539 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15540 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
15541 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15542 {
15543 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15544 rcStrict = VINF_SUCCESS;
15545 }
15546 return rcStrict;
15547}
15548
15549
15550/**
15551 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
15552 */
15553HMVMX_EXIT_DECL hmR0VmxExitVmptrst(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15554{
15555 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15556
15557 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15558 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
15559 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
15560 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
15561 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
15562 AssertRCReturn(rc, rc);
15563
15564 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15565
15566 VMXVEXITINFO ExitInfo;
15567 RT_ZERO(ExitInfo);
15568 ExitInfo.uReason = pVmxTransient->uExitReason;
15569 ExitInfo.u64Qual = pVmxTransient->uExitQual;
15570 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
15571 ExitInfo.cbInstr = pVmxTransient->cbInstr;
15572 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
15573
15574 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
15575 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15576 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
15577 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15578 {
15579 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15580 rcStrict = VINF_SUCCESS;
15581 }
15582 return rcStrict;
15583}
15584
15585
15586/**
15587 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
15588 */
15589HMVMX_EXIT_DECL hmR0VmxExitVmread(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15590{
15591 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15592
15593 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15594 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
15595 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
15596 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
15597 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
15598 AssertRCReturn(rc, rc);
15599
15600 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15601
15602 VMXVEXITINFO ExitInfo;
15603 RT_ZERO(ExitInfo);
15604 ExitInfo.uReason = pVmxTransient->uExitReason;
15605 ExitInfo.u64Qual = pVmxTransient->uExitQual;
15606 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
15607 ExitInfo.cbInstr = pVmxTransient->cbInstr;
15608 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
15609 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
15610
15611 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
15612 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15613 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
15614 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15615 {
15616 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15617 rcStrict = VINF_SUCCESS;
15618 }
15619 return rcStrict;
15620}
15621
15622
15623/**
15624 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
15625 */
15626HMVMX_EXIT_DECL hmR0VmxExitVmresume(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15627{
15628 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15629
15630 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
15631 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
15632 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15633 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
15634 AssertRCReturn(rc, rc);
15635
15636 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15637
15638 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbInstr, VMXINSTRID_VMRESUME);
15639 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15640 {
15641 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
15642 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
15643 }
15644 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
15645 return rcStrict;
15646}
15647
15648
15649/**
15650 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
15651 */
15652HMVMX_EXIT_DECL hmR0VmxExitVmwrite(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15653{
15654 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15655
15656 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15657 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
15658 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
15659 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
15660 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
15661 AssertRCReturn(rc, rc);
15662
15663 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15664
15665 VMXVEXITINFO ExitInfo;
15666 RT_ZERO(ExitInfo);
15667 ExitInfo.uReason = pVmxTransient->uExitReason;
15668 ExitInfo.u64Qual = pVmxTransient->uExitQual;
15669 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
15670 ExitInfo.cbInstr = pVmxTransient->cbInstr;
15671 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
15672 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
15673
15674 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
15675 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15676 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
15677 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15678 {
15679 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15680 rcStrict = VINF_SUCCESS;
15681 }
15682 return rcStrict;
15683}
15684
15685
15686/**
15687 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
15688 */
15689HMVMX_EXIT_DECL hmR0VmxExitVmxoff(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15690{
15691 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15692
15693 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15694 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
15695 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
15696 AssertRCReturn(rc, rc);
15697
15698 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15699
15700 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbInstr);
15701 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15702 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
15703 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15704 {
15705 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15706 rcStrict = VINF_SUCCESS;
15707 }
15708 return rcStrict;
15709}
15710
15711
15712/**
15713 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
15714 */
15715HMVMX_EXIT_DECL hmR0VmxExitVmxon(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15716{
15717 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15718
15719 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15720 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
15721 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
15722 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
15723 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
15724 AssertRCReturn(rc, rc);
15725
15726 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15727
15728 VMXVEXITINFO ExitInfo;
15729 RT_ZERO(ExitInfo);
15730 ExitInfo.uReason = pVmxTransient->uExitReason;
15731 ExitInfo.u64Qual = pVmxTransient->uExitQual;
15732 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
15733 ExitInfo.cbInstr = pVmxTransient->cbInstr;
15734 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
15735
15736 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
15737 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15738 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
15739 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15740 {
15741 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15742 rcStrict = VINF_SUCCESS;
15743 }
15744 return rcStrict;
15745}
15746
15747
15748/**
15749 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
15750 */
15751HMVMX_EXIT_DECL hmR0VmxExitInvvpid(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15752{
15753 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15754
15755 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15756 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
15757 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
15758 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
15759 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
15760 AssertRCReturn(rc, rc);
15761
15762 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15763
15764 VMXVEXITINFO ExitInfo;
15765 RT_ZERO(ExitInfo);
15766 ExitInfo.uReason = pVmxTransient->uExitReason;
15767 ExitInfo.u64Qual = pVmxTransient->uExitQual;
15768 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
15769 ExitInfo.cbInstr = pVmxTransient->cbInstr;
15770 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
15771
15772 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
15773 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15774 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
15775 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15776 {
15777 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15778 rcStrict = VINF_SUCCESS;
15779 }
15780 return rcStrict;
15781}
15782
15783/** @} */
15784
15785/** @name Nested-guest VM-exit handlers.
15786 * @{
15787 */
15788/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
15789/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
15790/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
15791
15792/**
15793 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
15794 * Conditional VM-exit.
15795 */
15796HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmiNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15797{
15798 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15799
15800 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
15801 AssertRCReturn(rc, rc);
15802
15803 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
15804 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
15805 uint32_t const uExtIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
15806
15807 switch (uExtIntType)
15808 {
15809 /*
15810 * Physical NMIs:
15811 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the
15812 * host.
15813 */
15814 case VMX_EXIT_INT_INFO_TYPE_NMI:
15815 return hmR0VmxExitHostNmi(pVCpu);
15816
15817 /*
15818 * Hardware exceptions,
15819 * Software exceptions:
15820 * Privileged software exceptions:
15821 * Figure out if the exception must be delivered to the guest or the nested-guest.
15822 */
15823 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
15824 {
15825#if 0
15826 /* Page-faults are subject to masking using its error code. */
15827 uint32_t fXcptBitmap = pVmcs->u32XcptBitmap;
15828 if (uVector == X86_XCPT_PF)
15829 {
15830 uint32_t const fXcptPFMask = pVmcs->u32XcptPFMask;
15831 uint32_t const fXcptPFMatch = pVmcs->u32XcptPFMatch;
15832 if ((uErrCode & fXcptPFMask) != fXcptPFMatch)
15833 fXcptBitmap ^= RT_BIT(X86_XCPT_PF);
15834 }
15835
15836 /* Consult the exception bitmap for all other hardware exceptions. */
15837 Assert(uVector <= X86_XCPT_LAST);
15838 if (fXcptBitmap & RT_BIT(uVector))
15839 fIntercept = true;
15840#endif
15841 break;
15842 }
15843
15844 /*
15845 * External interrupts:
15846 * This should only happen when "acknowledge external interrupts on VM-exit"
15847 * control is set. However, we don't set it when executing guests or
15848 * nested-guests. For nested-guests it is emulated while injecting interrupts into
15849 * the guest.
15850 *
15851 * Software interrupts:
15852 * VM-exits cannot be caused by software interrupts.
15853 */
15854 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
15855 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
15856 default:
15857 {
15858 pVCpu->hm.s.u32HMError = pVmxTransient->uExitIntInfo;
15859 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
15860 }
15861 }
15862
15863 return VERR_NOT_IMPLEMENTED;
15864}
15865
15866
15867/**
15868 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
15869 * Unconditional VM-exit.
15870 */
15871HMVMX_EXIT_DECL hmR0VmxExitTripleFaultNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15872{
15873 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15874 return IEMExecVmxVmexitTripleFault(pVCpu);
15875}
15876
15877
15878/**
15879 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
15880 */
15881HMVMX_EXIT_NSRC_DECL hmR0VmxExitIntWindowNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15882{
15883 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15884
15885 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
15886 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
15887 return hmR0VmxExitIntWindow(pVCpu, pVmxTransient);
15888}
15889
15890
15891/**
15892 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
15893 */
15894HMVMX_EXIT_NSRC_DECL hmR0VmxExitNmiWindowNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15895{
15896 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15897
15898 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
15899 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
15900 return hmR0VmxExitIntWindow(pVCpu, pVmxTransient);
15901}
15902
15903
15904/**
15905 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
15906 * Unconditional VM-exit.
15907 */
15908HMVMX_EXIT_DECL hmR0VmxExitTaskSwitchNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15909{
15910 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15911
15912 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
15913 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15914 rc |= hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
15915 rc |= hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
15916 AssertRCReturn(rc, rc);
15917
15918 VMXVEXITINFO ExitInfo;
15919 RT_ZERO(ExitInfo);
15920 ExitInfo.cbInstr = pVmxTransient->cbInstr;
15921 ExitInfo.u64Qual = pVmxTransient->uExitQual;
15922
15923 VMXVEXITEVENTINFO ExitEventInfo;
15924 RT_ZERO(ExitInfo);
15925 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
15926 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
15927 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
15928}
15929
15930
15931/**
15932 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
15933 */
15934HMVMX_EXIT_DECL hmR0VmxExitHltNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15935{
15936 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15937
15938 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
15939 {
15940 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15941 AssertRCReturn(rc, rc);
15942 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr);
15943 }
15944 return hmR0VmxExitHlt(pVCpu, pVmxTransient);
15945}
15946
15947
15948/**
15949 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
15950 */
15951HMVMX_EXIT_DECL hmR0VmxExitInvlpgNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15952{
15953 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15954
15955 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
15956 {
15957 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15958 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
15959 AssertRCReturn(rc, rc);
15960
15961 VMXVEXITINFO ExitInfo;
15962 RT_ZERO(ExitInfo);
15963 ExitInfo.uReason = pVmxTransient->uExitReason;
15964 ExitInfo.cbInstr = pVmxTransient->cbInstr;
15965 ExitInfo.u64Qual = pVmxTransient->uExitQual;
15966 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
15967 }
15968 return hmR0VmxExitInvlpg(pVCpu, pVmxTransient);
15969}
15970
15971
15972/**
15973 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
15974 */
15975HMVMX_EXIT_DECL hmR0VmxExitRdpmcNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15976{
15977 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15978
15979 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
15980 {
15981 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15982 AssertRCReturn(rc, rc);
15983 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr);
15984 }
15985 return hmR0VmxExitRdpmc(pVCpu, pVmxTransient);
15986}
15987
15988
15989/**
15990 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
15991 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
15992 */
15993HMVMX_EXIT_DECL hmR0VmxExitVmreadVmwriteNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15994{
15995 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15996
15997 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
15998 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
15999
16000 int rc = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
16001 AssertRCReturn(rc, rc);
16002
16003 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
16004 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
16005 uint64_t u64FieldEnc = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
16006
16007 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
16008 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
16009 u64FieldEnc &= UINT64_C(0xffffffff);
16010
16011 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64FieldEnc))
16012 {
16013 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16014 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
16015 AssertRCReturn(rc, rc);
16016
16017 VMXVEXITINFO ExitInfo;
16018 RT_ZERO(ExitInfo);
16019 ExitInfo.uReason = pVmxTransient->uExitReason;
16020 ExitInfo.cbInstr = pVmxTransient->cbInstr;
16021 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16022 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
16023 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
16024 }
16025
16026 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
16027 return hmR0VmxExitVmread(pVCpu, pVmxTransient);
16028 return hmR0VmxExitVmwrite(pVCpu, pVmxTransient);
16029}
16030
16031
16032/**
16033 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
16034 */
16035HMVMX_EXIT_DECL hmR0VmxExitRdtscNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16036{
16037 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16038
16039 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
16040 {
16041 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16042 AssertRCReturn(rc, rc);
16043 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr);
16044 }
16045
16046 return hmR0VmxExitRdtsc(pVCpu, pVmxTransient);
16047}
16048
16049
16050/**
16051 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
16052 * Conditional VM-exit.
16053 */
16054HMVMX_EXIT_DECL hmR0VmxExitMovCRxNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16055{
16056 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16057
16058 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
16059 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16060 AssertRCReturn(rc, rc);
16061
16062 VBOXSTRICTRC rcStrict;
16063 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
16064 switch (uAccessType)
16065 {
16066 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
16067 {
16068 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
16069 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
16070 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
16071 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
16072 if (CPUMIsGuestVmxMovToCr0Cr4InterceptSet(pVCpu, &pVCpu->cpum.GstCtx, iCrReg, uNewCrX))
16073 {
16074 VMXVEXITINFO ExitInfo;
16075 RT_ZERO(ExitInfo);
16076 ExitInfo.uReason = pVmxTransient->uExitReason;
16077 ExitInfo.cbInstr = pVmxTransient->cbInstr;
16078 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16079 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
16080 }
16081 else
16082 rcStrict = hmR0VmxExitMovToCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbInstr, iGReg, iCrReg);
16083 break;
16084 }
16085
16086 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
16087 {
16088 /*
16089 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
16090 * CR2 reads do not cause a VM-exit.
16091 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
16092 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
16093 */
16094 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
16095 if ( iCrReg == 3
16096 || iCrReg == 8)
16097 {
16098 static const uint32_t s_aCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
16099 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
16100 uint32_t const uIntercept = s_aCrXReadIntercepts[iCrReg];
16101 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, uIntercept))
16102 {
16103 VMXVEXITINFO ExitInfo;
16104 RT_ZERO(ExitInfo);
16105 ExitInfo.uReason = pVmxTransient->uExitReason;
16106 ExitInfo.cbInstr = pVmxTransient->cbInstr;
16107 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16108 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
16109 }
16110 else
16111 {
16112 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
16113 rcStrict = hmR0VmxExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbInstr, iGReg, iCrReg);
16114 }
16115 }
16116 else
16117 {
16118 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
16119 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
16120 }
16121 break;
16122 }
16123
16124 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
16125 {
16126 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
16127 Assert(pVmcsNstGst);
16128 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
16129 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
16130 if ( (uGstHostMask & X86_CR0_TS)
16131 && (uReadShadow & X86_CR0_TS))
16132 {
16133 VMXVEXITINFO ExitInfo;
16134 RT_ZERO(ExitInfo);
16135 ExitInfo.uReason = pVmxTransient->uExitReason;
16136 ExitInfo.cbInstr = pVmxTransient->cbInstr;
16137 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16138 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
16139 }
16140 else
16141 rcStrict = hmR0VmxExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbInstr);
16142 break;
16143 }
16144
16145 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
16146 {
16147 RTGCPTR GCPtrEffDst;
16148 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
16149 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
16150 if (fMemOperand)
16151 {
16152 rc = hmR0VmxReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
16153 AssertRCReturn(rc, rc);
16154 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
16155 }
16156 else
16157 GCPtrEffDst = NIL_RTGCPTR;
16158
16159 if (CPUMIsGuestVmxLmswInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, uNewMsw))
16160 {
16161 VMXVEXITINFO ExitInfo;
16162 RT_ZERO(ExitInfo);
16163 ExitInfo.uReason = pVmxTransient->uExitReason;
16164 ExitInfo.cbInstr = pVmxTransient->cbInstr;
16165 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
16166 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16167 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
16168 }
16169 else
16170 rcStrict = hmR0VmxExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbInstr, uNewMsw, GCPtrEffDst);
16171 break;
16172 }
16173
16174 default:
16175 {
16176 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
16177 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
16178 }
16179 }
16180
16181 if (rcStrict == VINF_IEM_RAISED_XCPT)
16182 {
16183 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
16184 rcStrict = VINF_SUCCESS;
16185 }
16186 return rcStrict;
16187}
16188
16189
16190/**
16191 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
16192 * Conditional VM-exit.
16193 */
16194HMVMX_EXIT_DECL hmR0VmxExitMovDRxNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16195{
16196 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16197
16198 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
16199 {
16200 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
16201 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16202 AssertRCReturn(rc, rc);
16203
16204 VMXVEXITINFO ExitInfo;
16205 RT_ZERO(ExitInfo);
16206 ExitInfo.cbInstr = pVmxTransient->cbInstr;
16207 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16208 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
16209 }
16210 return hmR0VmxExitMovDRx(pVCpu, pVmxTransient);
16211}
16212
16213
16214/**
16215 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
16216 * Conditional VM-exit.
16217 */
16218HMVMX_EXIT_DECL hmR0VmxExitIoInstrNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16219{
16220 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16221
16222 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
16223 AssertRCReturn(rc, rc);
16224
16225 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
16226 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
16227 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
16228
16229 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
16230 uint8_t const cbAccess = s_aIOSizes[uIOSize];
16231 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
16232 {
16233 /*
16234 * IN/OUT instruction:
16235 * - Provides VM-exit instruction length.
16236 *
16237 * INS/OUTS instruction:
16238 * - Provides VM-exit instruction length.
16239 * - Provides Guest-linear address.
16240 * - Optionally provides VM-exit instruction info (depends on CPU feature).
16241 */
16242 PVM pVM = pVCpu->CTX_SUFF(pVM);
16243 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16244 AssertRCReturn(rc, rc);
16245
16246 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
16247 pVmxTransient->ExitInstrInfo.u = 0;
16248 pVmxTransient->uGuestLinearAddr = 0;
16249
16250 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
16251 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
16252 if (fIOString)
16253 {
16254 rc |= hmR0VmxReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
16255 if (fVmxInsOutsInfo)
16256 {
16257 Assert(RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
16258 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
16259 }
16260 }
16261 AssertRCReturn(rc, rc);
16262
16263 VMXVEXITINFO ExitInfo;
16264 RT_ZERO(ExitInfo);
16265 ExitInfo.uReason = pVmxTransient->uExitReason;
16266 ExitInfo.cbInstr = pVmxTransient->cbInstr;
16267 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16268 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
16269 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
16270 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
16271 }
16272 return hmR0VmxExitIoInstr(pVCpu, pVmxTransient);
16273}
16274
16275
16276/**
16277 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
16278 */
16279HMVMX_EXIT_DECL hmR0VmxExitRdmsrNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16280{
16281 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16282
16283 uint32_t fMsrpm;
16284 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
16285 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), pVCpu->cpum.GstCtx.ecx);
16286 else
16287 fMsrpm = VMXMSRPM_EXIT_RD;
16288
16289 if (fMsrpm & VMXMSRPM_EXIT_RD)
16290 {
16291 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16292 AssertRCReturn(rc, rc);
16293 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr);
16294 }
16295 return hmR0VmxExitRdmsr(pVCpu, pVmxTransient);
16296}
16297
16298
16299/**
16300 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
16301 */
16302HMVMX_EXIT_DECL hmR0VmxExitWrmsrNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16303{
16304 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16305
16306 uint32_t fMsrpm;
16307 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
16308 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), pVCpu->cpum.GstCtx.ecx);
16309 else
16310 fMsrpm = VMXMSRPM_EXIT_WR;
16311
16312 if (fMsrpm & VMXMSRPM_EXIT_WR)
16313 {
16314 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16315 AssertRCReturn(rc, rc);
16316 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr);
16317 }
16318 return hmR0VmxExitWrmsr(pVCpu, pVmxTransient);
16319}
16320
16321
16322/**
16323 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
16324 */
16325HMVMX_EXIT_DECL hmR0VmxExitMwaitNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16326{
16327 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16328
16329 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
16330 {
16331 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16332 AssertRCReturn(rc, rc);
16333 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr);
16334 }
16335 return hmR0VmxExitMwait(pVCpu, pVmxTransient);
16336}
16337
16338
16339/**
16340 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
16341 * VM-exit.
16342 */
16343HMVMX_EXIT_DECL hmR0VmxExitMtfNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16344{
16345 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16346
16347 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
16348 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
16349}
16350
16351
16352/**
16353 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
16354 */
16355HMVMX_EXIT_DECL hmR0VmxExitMonitorNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16356{
16357 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16358
16359 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
16360 {
16361 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16362 AssertRCReturn(rc, rc);
16363 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr);
16364 }
16365 return hmR0VmxExitMonitor(pVCpu, pVmxTransient);
16366}
16367
16368
16369/**
16370 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
16371 */
16372HMVMX_EXIT_DECL hmR0VmxExitPauseNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16373{
16374 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16375
16376 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
16377 * PAUSE when executing a nested-guest? If it does not, we would not need
16378 * to check for the intercepts here. Just call VM-exit... */
16379
16380 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
16381 if ( CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
16382 || CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
16383 {
16384 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16385 AssertRCReturn(rc, rc);
16386 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr);
16387 }
16388 return hmR0VmxExitPause(pVCpu, pVmxTransient);
16389}
16390
16391
16392/**
16393 * Nested-guest VM-exit handler for when the TPR value is lowered below the
16394 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
16395 */
16396HMVMX_EXIT_NSRC_DECL hmR0VmxExitTprBelowThresholdNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16397{
16398 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16399
16400 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
16401 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
16402 return hmR0VmxExitTprBelowThreshold(pVCpu, pVmxTransient);
16403}
16404
16405
16406/**
16407 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
16408 * VM-exit.
16409 */
16410HMVMX_EXIT_DECL hmR0VmxExitApicAccessNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16411{
16412 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16413
16414 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16415 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
16416 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16417 rc |= hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
16418 rc |= hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
16419 AssertRCReturn(rc, rc);
16420
16421 VMXVEXITINFO ExitInfo;
16422 RT_ZERO(ExitInfo);
16423 ExitInfo.cbInstr = pVmxTransient->cbInstr;
16424 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16425
16426 VMXVEXITEVENTINFO ExitEventInfo;
16427 RT_ZERO(ExitInfo);
16428 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
16429 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
16430 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
16431}
16432
16433
16434/**
16435 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
16436 * Conditional VM-exit.
16437 */
16438HMVMX_EXIT_DECL hmR0VmxExitApicWriteNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16439{
16440 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16441
16442 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
16443 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
16444 AssertRCReturn(rc, rc);
16445
16446 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
16447}
16448
16449
16450/**
16451 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
16452 * Conditional VM-exit.
16453 */
16454HMVMX_EXIT_DECL hmR0VmxExitVirtEoiNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16455{
16456 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16457
16458 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
16459 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
16460 AssertRCReturn(rc, rc);
16461
16462 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
16463}
16464
16465
16466/**
16467 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
16468 */
16469HMVMX_EXIT_DECL hmR0VmxExitRdtscpNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16470{
16471 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16472
16473 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
16474 {
16475 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
16476 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16477 AssertRCReturn(rc, rc);
16478 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr);
16479 }
16480 return hmR0VmxExitRdtscp(pVCpu, pVmxTransient);
16481}
16482
16483
16484/**
16485 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
16486 */
16487HMVMX_EXIT_NSRC_DECL hmR0VmxExitWbinvdNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16488{
16489 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16490
16491 if (CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
16492 {
16493 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16494 AssertRCReturn(rc, rc);
16495 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr);
16496 }
16497 return hmR0VmxExitWbinvd(pVCpu, pVmxTransient);
16498}
16499
16500
16501/**
16502 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
16503 */
16504HMVMX_EXIT_DECL hmR0VmxExitInvpcidNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16505{
16506 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16507
16508 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
16509 {
16510 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
16511 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16512 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
16513 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
16514 AssertRCReturn(rc, rc);
16515
16516 VMXVEXITINFO ExitInfo;
16517 RT_ZERO(ExitInfo);
16518 ExitInfo.uReason = pVmxTransient->uExitReason;
16519 ExitInfo.cbInstr = pVmxTransient->cbInstr;
16520 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16521 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
16522 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
16523 }
16524 return hmR0VmxExitInvpcid(pVCpu, pVmxTransient);
16525}
16526
16527
16528/**
16529 * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally
16530 * and only provide the instruction length.
16531 *
16532 * Unconditional VM-exit.
16533 */
16534HMVMX_EXIT_DECL hmR0VmxExitInstrNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16535{
16536 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16537
16538#ifdef VBOX_STRICT
16539 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
16540 switch (pVmxTransient->uExitReason)
16541 {
16542 case VMX_EXIT_ENCLS:
16543 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
16544 break;
16545
16546 case VMX_EXIT_VMFUNC:
16547 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, pCtx, VMX_PROC_CTLS2_VMFUNC));
16548 break;
16549 }
16550#endif
16551
16552 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16553 AssertRCReturn(rc, rc);
16554 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbInstr);
16555}
16556
16557
16558/**
16559 * Nested-guest VM-exit handler for instructions that provide instruction length as
16560 * well as more information.
16561 *
16562 * Unconditional VM-exit.
16563 */
16564HMVMX_EXIT_DECL hmR0VmxExitInstrWithInfoNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
16565{
16566 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16567
16568#ifdef VBOX_STRICT
16569 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
16570 switch (pVmxTransient->uExitReason)
16571 {
16572 case VMX_EXIT_GDTR_IDTR_ACCESS:
16573 case VMX_EXIT_LDTR_TR_ACCESS:
16574 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
16575 break;
16576
16577 case VMX_EXIT_RDRAND:
16578 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
16579 break;
16580
16581 case VMX_EXIT_RDSEED:
16582 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
16583 break;
16584
16585 case VMX_EXIT_XSAVES:
16586 case VMX_EXIT_XRSTORS:
16587 /** @todo NSTVMX: Verify XSS-bitmap. */
16588 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
16589 break;
16590
16591 case VMX_EXIT_UMWAIT:
16592 case VMX_EXIT_TPAUSE:
16593 Assert(CPUMIsGuestVmxProcCtlsSet(pVCpu, pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
16594 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
16595 break;
16596 }
16597#endif
16598
16599 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16600 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
16601 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
16602 AssertRCReturn(rc, rc);
16603
16604 VMXVEXITINFO ExitInfo;
16605 RT_ZERO(ExitInfo);
16606 ExitInfo.uReason = pVmxTransient->uExitReason;
16607 ExitInfo.cbInstr = pVmxTransient->cbInstr;
16608 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16609 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
16610 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
16611}
16612
16613/** @} */
16614
16615#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16616
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette